Update clang-tools to ab/8980388

This commit includes:
* aosp/2176545 - Change the format of header-abi-diff config from .ini to .json
* aosp/2182596 - Implement the Cross-Version ABI diff configuration
* aosp/2190085 - Fix header-abi-diff skips diff when config.json is not found

Bug: 239792343
Test: Presubmit only
Change-Id: I32460bab794e5197c4406552c799a5e93671f3aa
diff --git a/darwin-x86/bin/versioner b/darwin-x86/bin/versioner
index be05183..d19d055 100755
--- a/darwin-x86/bin/versioner
+++ b/darwin-x86/bin/versioner
Binary files differ
diff --git a/darwin-x86/clang-headers b/darwin-x86/clang-headers
index fabd016..161b83d 120000
--- a/darwin-x86/clang-headers
+++ b/darwin-x86/clang-headers
@@ -1 +1 @@
-lib64/clang/14.0.2/include
\ No newline at end of file
+lib64/clang/14.0.6/include
\ No newline at end of file
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_builtin_vars.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_builtin_vars.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_builtin_vars.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_builtin_vars.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_cmath.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_cmath.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_cmath.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_cmath.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_complex_builtins.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_complex_builtins.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_complex_builtins.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_complex_builtins.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_device_functions.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_device_functions.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_device_functions.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_device_functions.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_intrinsics.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_intrinsics.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_libdevice_declares.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_libdevice_declares.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_libdevice_declares.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_libdevice_declares.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_math.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_math.h
index 538556f..e447590 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_math.h
@@ -345,4 +345,4 @@
 #pragma pop_macro("__DEVICE_VOID__")
 #pragma pop_macro("__FAST_OR_SLOW")
 
-#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
+#endif // __CLANG_CUDA_MATH_H__
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math_forward_declares.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_math_forward_declares.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math_forward_declares.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_math_forward_declares.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_runtime_wrapper.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_runtime_wrapper.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_runtime_wrapper.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_runtime_wrapper.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_texture_intrinsics.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_texture_intrinsics.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_texture_intrinsics.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_cuda_texture_intrinsics.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_cmath.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_hip_cmath.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_hip_cmath.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_hip_cmath.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_libdevice_declares.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_hip_libdevice_declares.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_hip_libdevice_declares.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_hip_libdevice_declares.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_math.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_hip_math.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_hip_math.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_hip_math.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h b/darwin-x86/lib64/clang/14.0.6/include/__clang_hip_runtime_wrapper.h
similarity index 80%
rename from darwin-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h
rename to darwin-x86/lib64/clang/14.0.6/include/__clang_hip_runtime_wrapper.h
index 73021d2..10cec58 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/__clang_hip_runtime_wrapper.h
@@ -50,6 +50,9 @@
 #include <cmath>
 #include <cstdlib>
 #include <stdlib.h>
+#if __has_include("hip/hip_version.h")
+#include "hip/hip_version.h"
+#endif // __has_include("hip/hip_version.h")
 #else
 typedef __SIZE_TYPE__ size_t;
 // Define macros which are needed to declare HIP device API's without standard
@@ -74,25 +77,35 @@
 extern "C" {
 #endif //__cplusplus
 
+#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 405
+extern "C" __device__ unsigned long long __ockl_dm_alloc(unsigned long long __size);
+extern "C" __device__ void __ockl_dm_dealloc(unsigned long long __addr);
+__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
+  return (void *) __ockl_dm_alloc(__size);
+}
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
+  __ockl_dm_dealloc((unsigned long long)__ptr);
+}
+#else  // HIP version check
 #if __HIP_ENABLE_DEVICE_MALLOC__
 __device__ void *__hip_malloc(__hip_size_t __size);
 __device__ void *__hip_free(void *__ptr);
 __attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
   return __hip_malloc(__size);
 }
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
-  return __hip_free(__ptr);
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
+  __hip_free(__ptr);
 }
 #else
 __attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
   __builtin_trap();
   return (void *)0;
 }
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
   __builtin_trap();
-  return (void *)0;
 }
 #endif
+#endif // HIP version check
 
 #ifdef __cplusplus
 } // extern "C"
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__stddef_max_align_t.h b/darwin-x86/lib64/clang/14.0.6/include/__stddef_max_align_t.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__stddef_max_align_t.h
rename to darwin-x86/lib64/clang/14.0.6/include/__stddef_max_align_t.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__wmmintrin_aes.h b/darwin-x86/lib64/clang/14.0.6/include/__wmmintrin_aes.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__wmmintrin_aes.h
rename to darwin-x86/lib64/clang/14.0.6/include/__wmmintrin_aes.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h b/darwin-x86/lib64/clang/14.0.6/include/__wmmintrin_pclmul.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h
rename to darwin-x86/lib64/clang/14.0.6/include/__wmmintrin_pclmul.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/adxintrin.h b/darwin-x86/lib64/clang/14.0.6/include/adxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/adxintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/adxintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/altivec.h b/darwin-x86/lib64/clang/14.0.6/include/altivec.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/altivec.h
rename to darwin-x86/lib64/clang/14.0.6/include/altivec.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ammintrin.h b/darwin-x86/lib64/clang/14.0.6/include/ammintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/ammintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/ammintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/amxintrin.h b/darwin-x86/lib64/clang/14.0.6/include/amxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/amxintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/amxintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm64intr.h b/darwin-x86/lib64/clang/14.0.6/include/arm64intr.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm64intr.h
rename to darwin-x86/lib64/clang/14.0.6/include/arm64intr.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_acle.h b/darwin-x86/lib64/clang/14.0.6/include/arm_acle.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_acle.h
rename to darwin-x86/lib64/clang/14.0.6/include/arm_acle.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_bf16.h b/darwin-x86/lib64/clang/14.0.6/include/arm_bf16.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_bf16.h
rename to darwin-x86/lib64/clang/14.0.6/include/arm_bf16.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_cde.h b/darwin-x86/lib64/clang/14.0.6/include/arm_cde.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_cde.h
rename to darwin-x86/lib64/clang/14.0.6/include/arm_cde.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_cmse.h b/darwin-x86/lib64/clang/14.0.6/include/arm_cmse.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_cmse.h
rename to darwin-x86/lib64/clang/14.0.6/include/arm_cmse.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_fp16.h b/darwin-x86/lib64/clang/14.0.6/include/arm_fp16.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_fp16.h
rename to darwin-x86/lib64/clang/14.0.6/include/arm_fp16.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_mve.h b/darwin-x86/lib64/clang/14.0.6/include/arm_mve.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_mve.h
rename to darwin-x86/lib64/clang/14.0.6/include/arm_mve.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_neon.h b/darwin-x86/lib64/clang/14.0.6/include/arm_neon.h
similarity index 90%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_neon.h
rename to darwin-x86/lib64/clang/14.0.6/include/arm_neon.h
index 2448870..76fd7c0 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/arm_neon.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/arm_neon.h
@@ -19090,11 +19090,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -19129,11 +19124,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -19224,11 +19214,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
-__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
 #endif
 
 __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
@@ -19251,11 +19236,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -21675,11 +21655,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -21714,11 +21689,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -21809,11 +21779,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
-__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
 #endif
 
 __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
@@ -21836,11 +21801,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -47053,7 +47013,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
   int32x4_t __ret;
-  __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
+  __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
   return __ret;
 }
 #else
@@ -47062,16 +47022,21 @@
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
+  __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
+__ai int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 __ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
   int16x8_t __ret;
-  __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
+  __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
   return __ret;
 }
 #else
@@ -47080,16 +47045,21 @@
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
+  __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
+__ai int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 __ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
   int32x2_t __ret;
-  __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
+  __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
   return __ret;
 }
 #else
@@ -47098,16 +47068,21 @@
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
   int32x2_t __ret;
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
+  __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
+__ai int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 __ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
   int16x4_t __ret;
-  __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
+  __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
   return __ret;
 }
 #else
@@ -47116,10 +47091,15 @@
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   int16x4_t __ret;
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
+  __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
+__ai int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -47128,7 +47108,7 @@
   int32x4_t __s1_309 = __p1_309; \
   int32x2_t __s2_309 = __p2_309; \
   int32x4_t __ret_309; \
-  __ret_309 = vqaddq_s32(__s0_309, vqrdmulhq_s32(__s1_309, splatq_lane_s32(__s2_309, __p3_309))); \
+  __ret_309 = vqrdmlahq_s32(__s0_309, __s1_309, splatq_lane_s32(__s2_309, __p3_309)); \
   __ret_309; \
 })
 #else
@@ -47140,7 +47120,7 @@
   int32x4_t __rev1_310;  __rev1_310 = __builtin_shufflevector(__s1_310, __s1_310, 3, 2, 1, 0); \
   int32x2_t __rev2_310;  __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \
   int32x4_t __ret_310; \
-  __ret_310 = __noswap_vqaddq_s32(__rev0_310, __noswap_vqrdmulhq_s32(__rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310))); \
+  __ret_310 = __noswap_vqrdmlahq_s32(__rev0_310, __rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310)); \
   __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \
   __ret_310; \
 })
@@ -47152,7 +47132,7 @@
   int16x8_t __s1_311 = __p1_311; \
   int16x4_t __s2_311 = __p2_311; \
   int16x8_t __ret_311; \
-  __ret_311 = vqaddq_s16(__s0_311, vqrdmulhq_s16(__s1_311, splatq_lane_s16(__s2_311, __p3_311))); \
+  __ret_311 = vqrdmlahq_s16(__s0_311, __s1_311, splatq_lane_s16(__s2_311, __p3_311)); \
   __ret_311; \
 })
 #else
@@ -47164,7 +47144,7 @@
   int16x8_t __rev1_312;  __rev1_312 = __builtin_shufflevector(__s1_312, __s1_312, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __rev2_312;  __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 3, 2, 1, 0); \
   int16x8_t __ret_312; \
-  __ret_312 = __noswap_vqaddq_s16(__rev0_312, __noswap_vqrdmulhq_s16(__rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312))); \
+  __ret_312 = __noswap_vqrdmlahq_s16(__rev0_312, __rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312)); \
   __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_312; \
 })
@@ -47176,7 +47156,7 @@
   int32x2_t __s1_313 = __p1_313; \
   int32x2_t __s2_313 = __p2_313; \
   int32x2_t __ret_313; \
-  __ret_313 = vqadd_s32(__s0_313, vqrdmulh_s32(__s1_313, splat_lane_s32(__s2_313, __p3_313))); \
+  __ret_313 = vqrdmlah_s32(__s0_313, __s1_313, splat_lane_s32(__s2_313, __p3_313)); \
   __ret_313; \
 })
 #else
@@ -47188,7 +47168,7 @@
   int32x2_t __rev1_314;  __rev1_314 = __builtin_shufflevector(__s1_314, __s1_314, 1, 0); \
   int32x2_t __rev2_314;  __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \
   int32x2_t __ret_314; \
-  __ret_314 = __noswap_vqadd_s32(__rev0_314, __noswap_vqrdmulh_s32(__rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314))); \
+  __ret_314 = __noswap_vqrdmlah_s32(__rev0_314, __rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314)); \
   __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \
   __ret_314; \
 })
@@ -47200,7 +47180,7 @@
   int16x4_t __s1_315 = __p1_315; \
   int16x4_t __s2_315 = __p2_315; \
   int16x4_t __ret_315; \
-  __ret_315 = vqadd_s16(__s0_315, vqrdmulh_s16(__s1_315, splat_lane_s16(__s2_315, __p3_315))); \
+  __ret_315 = vqrdmlah_s16(__s0_315, __s1_315, splat_lane_s16(__s2_315, __p3_315)); \
   __ret_315; \
 })
 #else
@@ -47212,7 +47192,7 @@
   int16x4_t __rev1_316;  __rev1_316 = __builtin_shufflevector(__s1_316, __s1_316, 3, 2, 1, 0); \
   int16x4_t __rev2_316;  __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \
   int16x4_t __ret_316; \
-  __ret_316 = __noswap_vqadd_s16(__rev0_316, __noswap_vqrdmulh_s16(__rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316))); \
+  __ret_316 = __noswap_vqrdmlah_s16(__rev0_316, __rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316)); \
   __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \
   __ret_316; \
 })
@@ -47221,7 +47201,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
   int32x4_t __ret;
-  __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
+  __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
   return __ret;
 }
 #else
@@ -47230,16 +47210,21 @@
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
+  __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
+__ai int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 __ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
   int16x8_t __ret;
-  __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
+  __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
   return __ret;
 }
 #else
@@ -47248,16 +47233,21 @@
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
+  __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
+__ai int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 __ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
   int32x2_t __ret;
-  __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
+  __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
   return __ret;
 }
 #else
@@ -47266,16 +47256,21 @@
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
   int32x2_t __ret;
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
+  __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
+__ai int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 __ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
   int16x4_t __ret;
-  __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
+  __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
   return __ret;
 }
 #else
@@ -47284,10 +47279,15 @@
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   int16x4_t __ret;
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
+  __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
+__ai int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -47296,7 +47296,7 @@
   int32x4_t __s1_317 = __p1_317; \
   int32x2_t __s2_317 = __p2_317; \
   int32x4_t __ret_317; \
-  __ret_317 = vqsubq_s32(__s0_317, vqrdmulhq_s32(__s1_317, splatq_lane_s32(__s2_317, __p3_317))); \
+  __ret_317 = vqrdmlshq_s32(__s0_317, __s1_317, splatq_lane_s32(__s2_317, __p3_317)); \
   __ret_317; \
 })
 #else
@@ -47308,7 +47308,7 @@
   int32x4_t __rev1_318;  __rev1_318 = __builtin_shufflevector(__s1_318, __s1_318, 3, 2, 1, 0); \
   int32x2_t __rev2_318;  __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \
   int32x4_t __ret_318; \
-  __ret_318 = __noswap_vqsubq_s32(__rev0_318, __noswap_vqrdmulhq_s32(__rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318))); \
+  __ret_318 = __noswap_vqrdmlshq_s32(__rev0_318, __rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318)); \
   __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 3, 2, 1, 0); \
   __ret_318; \
 })
@@ -47320,7 +47320,7 @@
   int16x8_t __s1_319 = __p1_319; \
   int16x4_t __s2_319 = __p2_319; \
   int16x8_t __ret_319; \
-  __ret_319 = vqsubq_s16(__s0_319, vqrdmulhq_s16(__s1_319, splatq_lane_s16(__s2_319, __p3_319))); \
+  __ret_319 = vqrdmlshq_s16(__s0_319, __s1_319, splatq_lane_s16(__s2_319, __p3_319)); \
   __ret_319; \
 })
 #else
@@ -47332,7 +47332,7 @@
   int16x8_t __rev1_320;  __rev1_320 = __builtin_shufflevector(__s1_320, __s1_320, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __rev2_320;  __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \
   int16x8_t __ret_320; \
-  __ret_320 = __noswap_vqsubq_s16(__rev0_320, __noswap_vqrdmulhq_s16(__rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320))); \
+  __ret_320 = __noswap_vqrdmlshq_s16(__rev0_320, __rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320)); \
   __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_320; \
 })
@@ -47344,7 +47344,7 @@
   int32x2_t __s1_321 = __p1_321; \
   int32x2_t __s2_321 = __p2_321; \
   int32x2_t __ret_321; \
-  __ret_321 = vqsub_s32(__s0_321, vqrdmulh_s32(__s1_321, splat_lane_s32(__s2_321, __p3_321))); \
+  __ret_321 = vqrdmlsh_s32(__s0_321, __s1_321, splat_lane_s32(__s2_321, __p3_321)); \
   __ret_321; \
 })
 #else
@@ -47356,7 +47356,7 @@
   int32x2_t __rev1_322;  __rev1_322 = __builtin_shufflevector(__s1_322, __s1_322, 1, 0); \
   int32x2_t __rev2_322;  __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \
   int32x2_t __ret_322; \
-  __ret_322 = __noswap_vqsub_s32(__rev0_322, __noswap_vqrdmulh_s32(__rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322))); \
+  __ret_322 = __noswap_vqrdmlsh_s32(__rev0_322, __rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322)); \
   __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 1, 0); \
   __ret_322; \
 })
@@ -47368,7 +47368,7 @@
   int16x4_t __s1_323 = __p1_323; \
   int16x4_t __s2_323 = __p2_323; \
   int16x4_t __ret_323; \
-  __ret_323 = vqsub_s16(__s0_323, vqrdmulh_s16(__s1_323, splat_lane_s16(__s2_323, __p3_323))); \
+  __ret_323 = vqrdmlsh_s16(__s0_323, __s1_323, splat_lane_s16(__s2_323, __p3_323)); \
   __ret_323; \
 })
 #else
@@ -47380,7 +47380,7 @@
   int16x4_t __rev1_324;  __rev1_324 = __builtin_shufflevector(__s1_324, __s1_324, 3, 2, 1, 0); \
   int16x4_t __rev2_324;  __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 3, 2, 1, 0); \
   int16x4_t __ret_324; \
-  __ret_324 = __noswap_vqsub_s16(__rev0_324, __noswap_vqrdmulh_s16(__rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324))); \
+  __ret_324 = __noswap_vqrdmlsh_s16(__rev0_324, __rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324)); \
   __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \
   __ret_324; \
 })
@@ -47388,113 +47388,111 @@
 
 #endif
 #if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
+__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqrdmlahs_s32(__p0, __p1, __p2);
+  return __ret;
+}
+__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqrdmlahh_s16(__p0, __p1, __p2);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
-  int32x4_t __s0_325 = __p0_325; \
-  int32x4_t __s1_325 = __p1_325; \
-  int32x4_t __s2_325 = __p2_325; \
-  int32x4_t __ret_325; \
-  __ret_325 = vqaddq_s32(__s0_325, vqrdmulhq_s32(__s1_325, splatq_laneq_s32(__s2_325, __p3_325))); \
+#define vqrdmlahs_lane_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
+  int32_t __s0_325 = __p0_325; \
+  int32_t __s1_325 = __p1_325; \
+  int32x2_t __s2_325 = __p2_325; \
+  int32_t __ret_325; \
+  __ret_325 = vqrdmlahs_s32(__s0_325, __s1_325, vget_lane_s32(__s2_325, __p3_325)); \
   __ret_325; \
 })
 #else
-#define vqrdmlahq_laneq_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
-  int32x4_t __s0_326 = __p0_326; \
-  int32x4_t __s1_326 = __p1_326; \
-  int32x4_t __s2_326 = __p2_326; \
-  int32x4_t __rev0_326;  __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 3, 2, 1, 0); \
-  int32x4_t __rev1_326;  __rev1_326 = __builtin_shufflevector(__s1_326, __s1_326, 3, 2, 1, 0); \
-  int32x4_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 3, 2, 1, 0); \
-  int32x4_t __ret_326; \
-  __ret_326 = __noswap_vqaddq_s32(__rev0_326, __noswap_vqrdmulhq_s32(__rev1_326, __noswap_splatq_laneq_s32(__rev2_326, __p3_326))); \
-  __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 3, 2, 1, 0); \
+#define vqrdmlahs_lane_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
+  int32_t __s0_326 = __p0_326; \
+  int32_t __s1_326 = __p1_326; \
+  int32x2_t __s2_326 = __p2_326; \
+  int32x2_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 1, 0); \
+  int32_t __ret_326; \
+  __ret_326 = vqrdmlahs_s32(__s0_326, __s1_326, __noswap_vget_lane_s32(__rev2_326, __p3_326)); \
   __ret_326; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
-  int16x8_t __s0_327 = __p0_327; \
-  int16x8_t __s1_327 = __p1_327; \
-  int16x8_t __s2_327 = __p2_327; \
-  int16x8_t __ret_327; \
-  __ret_327 = vqaddq_s16(__s0_327, vqrdmulhq_s16(__s1_327, splatq_laneq_s16(__s2_327, __p3_327))); \
+#define vqrdmlahh_lane_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
+  int16_t __s0_327 = __p0_327; \
+  int16_t __s1_327 = __p1_327; \
+  int16x4_t __s2_327 = __p2_327; \
+  int16_t __ret_327; \
+  __ret_327 = vqrdmlahh_s16(__s0_327, __s1_327, vget_lane_s16(__s2_327, __p3_327)); \
   __ret_327; \
 })
 #else
-#define vqrdmlahq_laneq_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
-  int16x8_t __s0_328 = __p0_328; \
-  int16x8_t __s1_328 = __p1_328; \
-  int16x8_t __s2_328 = __p2_328; \
-  int16x8_t __rev0_328;  __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_328;  __rev1_328 = __builtin_shufflevector(__s1_328, __s1_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_328; \
-  __ret_328 = __noswap_vqaddq_s16(__rev0_328, __noswap_vqrdmulhq_s16(__rev1_328, __noswap_splatq_laneq_s16(__rev2_328, __p3_328))); \
-  __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vqrdmlahh_lane_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
+  int16_t __s0_328 = __p0_328; \
+  int16_t __s1_328 = __p1_328; \
+  int16x4_t __s2_328 = __p2_328; \
+  int16x4_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \
+  int16_t __ret_328; \
+  __ret_328 = vqrdmlahh_s16(__s0_328, __s1_328, __noswap_vget_lane_s16(__rev2_328, __p3_328)); \
   __ret_328; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
-  int32x2_t __s0_329 = __p0_329; \
-  int32x2_t __s1_329 = __p1_329; \
+#define vqrdmlahs_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
+  int32_t __s0_329 = __p0_329; \
+  int32_t __s1_329 = __p1_329; \
   int32x4_t __s2_329 = __p2_329; \
-  int32x2_t __ret_329; \
-  __ret_329 = vqadd_s32(__s0_329, vqrdmulh_s32(__s1_329, splat_laneq_s32(__s2_329, __p3_329))); \
+  int32_t __ret_329; \
+  __ret_329 = vqrdmlahs_s32(__s0_329, __s1_329, vgetq_lane_s32(__s2_329, __p3_329)); \
   __ret_329; \
 })
 #else
-#define vqrdmlah_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
-  int32x2_t __s0_330 = __p0_330; \
-  int32x2_t __s1_330 = __p1_330; \
+#define vqrdmlahs_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
+  int32_t __s0_330 = __p0_330; \
+  int32_t __s1_330 = __p1_330; \
   int32x4_t __s2_330 = __p2_330; \
-  int32x2_t __rev0_330;  __rev0_330 = __builtin_shufflevector(__s0_330, __s0_330, 1, 0); \
-  int32x2_t __rev1_330;  __rev1_330 = __builtin_shufflevector(__s1_330, __s1_330, 1, 0); \
   int32x4_t __rev2_330;  __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \
-  int32x2_t __ret_330; \
-  __ret_330 = __noswap_vqadd_s32(__rev0_330, __noswap_vqrdmulh_s32(__rev1_330, __noswap_splat_laneq_s32(__rev2_330, __p3_330))); \
-  __ret_330 = __builtin_shufflevector(__ret_330, __ret_330, 1, 0); \
+  int32_t __ret_330; \
+  __ret_330 = vqrdmlahs_s32(__s0_330, __s1_330, __noswap_vgetq_lane_s32(__rev2_330, __p3_330)); \
   __ret_330; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
-  int16x4_t __s0_331 = __p0_331; \
-  int16x4_t __s1_331 = __p1_331; \
+#define vqrdmlahh_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
+  int16_t __s0_331 = __p0_331; \
+  int16_t __s1_331 = __p1_331; \
   int16x8_t __s2_331 = __p2_331; \
-  int16x4_t __ret_331; \
-  __ret_331 = vqadd_s16(__s0_331, vqrdmulh_s16(__s1_331, splat_laneq_s16(__s2_331, __p3_331))); \
+  int16_t __ret_331; \
+  __ret_331 = vqrdmlahh_s16(__s0_331, __s1_331, vgetq_lane_s16(__s2_331, __p3_331)); \
   __ret_331; \
 })
 #else
-#define vqrdmlah_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
-  int16x4_t __s0_332 = __p0_332; \
-  int16x4_t __s1_332 = __p1_332; \
+#define vqrdmlahh_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
+  int16_t __s0_332 = __p0_332; \
+  int16_t __s1_332 = __p1_332; \
   int16x8_t __s2_332 = __p2_332; \
-  int16x4_t __rev0_332;  __rev0_332 = __builtin_shufflevector(__s0_332, __s0_332, 3, 2, 1, 0); \
-  int16x4_t __rev1_332;  __rev1_332 = __builtin_shufflevector(__s1_332, __s1_332, 3, 2, 1, 0); \
   int16x8_t __rev2_332;  __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_332; \
-  __ret_332 = __noswap_vqadd_s16(__rev0_332, __noswap_vqrdmulh_s16(__rev1_332, __noswap_splat_laneq_s16(__rev2_332, __p3_332))); \
-  __ret_332 = __builtin_shufflevector(__ret_332, __ret_332, 3, 2, 1, 0); \
+  int16_t __ret_332; \
+  __ret_332 = vqrdmlahh_s16(__s0_332, __s1_332, __noswap_vgetq_lane_s16(__rev2_332, __p3_332)); \
   __ret_332; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
+#define vqrdmlahq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
   int32x4_t __s0_333 = __p0_333; \
   int32x4_t __s1_333 = __p1_333; \
   int32x4_t __s2_333 = __p2_333; \
   int32x4_t __ret_333; \
-  __ret_333 = vqsubq_s32(__s0_333, vqrdmulhq_s32(__s1_333, splatq_laneq_s32(__s2_333, __p3_333))); \
+  __ret_333 = vqrdmlahq_s32(__s0_333, __s1_333, splatq_laneq_s32(__s2_333, __p3_333)); \
   __ret_333; \
 })
 #else
-#define vqrdmlshq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
+#define vqrdmlahq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
   int32x4_t __s0_334 = __p0_334; \
   int32x4_t __s1_334 = __p1_334; \
   int32x4_t __s2_334 = __p2_334; \
@@ -47502,23 +47500,23 @@
   int32x4_t __rev1_334;  __rev1_334 = __builtin_shufflevector(__s1_334, __s1_334, 3, 2, 1, 0); \
   int32x4_t __rev2_334;  __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 3, 2, 1, 0); \
   int32x4_t __ret_334; \
-  __ret_334 = __noswap_vqsubq_s32(__rev0_334, __noswap_vqrdmulhq_s32(__rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334))); \
+  __ret_334 = __noswap_vqrdmlahq_s32(__rev0_334, __rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334)); \
   __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \
   __ret_334; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \
+#define vqrdmlahq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \
   int16x8_t __s0_335 = __p0_335; \
   int16x8_t __s1_335 = __p1_335; \
   int16x8_t __s2_335 = __p2_335; \
   int16x8_t __ret_335; \
-  __ret_335 = vqsubq_s16(__s0_335, vqrdmulhq_s16(__s1_335, splatq_laneq_s16(__s2_335, __p3_335))); \
+  __ret_335 = vqrdmlahq_s16(__s0_335, __s1_335, splatq_laneq_s16(__s2_335, __p3_335)); \
   __ret_335; \
 })
 #else
-#define vqrdmlshq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \
+#define vqrdmlahq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \
   int16x8_t __s0_336 = __p0_336; \
   int16x8_t __s1_336 = __p1_336; \
   int16x8_t __s2_336 = __p2_336; \
@@ -47526,23 +47524,23 @@
   int16x8_t __rev1_336;  __rev1_336 = __builtin_shufflevector(__s1_336, __s1_336, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __rev2_336;  __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __ret_336; \
-  __ret_336 = __noswap_vqsubq_s16(__rev0_336, __noswap_vqrdmulhq_s16(__rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336))); \
+  __ret_336 = __noswap_vqrdmlahq_s16(__rev0_336, __rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336)); \
   __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_336; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \
+#define vqrdmlah_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \
   int32x2_t __s0_337 = __p0_337; \
   int32x2_t __s1_337 = __p1_337; \
   int32x4_t __s2_337 = __p2_337; \
   int32x2_t __ret_337; \
-  __ret_337 = vqsub_s32(__s0_337, vqrdmulh_s32(__s1_337, splat_laneq_s32(__s2_337, __p3_337))); \
+  __ret_337 = vqrdmlah_s32(__s0_337, __s1_337, splat_laneq_s32(__s2_337, __p3_337)); \
   __ret_337; \
 })
 #else
-#define vqrdmlsh_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \
+#define vqrdmlah_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \
   int32x2_t __s0_338 = __p0_338; \
   int32x2_t __s1_338 = __p1_338; \
   int32x4_t __s2_338 = __p2_338; \
@@ -47550,23 +47548,23 @@
   int32x2_t __rev1_338;  __rev1_338 = __builtin_shufflevector(__s1_338, __s1_338, 1, 0); \
   int32x4_t __rev2_338;  __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \
   int32x2_t __ret_338; \
-  __ret_338 = __noswap_vqsub_s32(__rev0_338, __noswap_vqrdmulh_s32(__rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338))); \
+  __ret_338 = __noswap_vqrdmlah_s32(__rev0_338, __rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338)); \
   __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \
   __ret_338; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \
+#define vqrdmlah_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \
   int16x4_t __s0_339 = __p0_339; \
   int16x4_t __s1_339 = __p1_339; \
   int16x8_t __s2_339 = __p2_339; \
   int16x4_t __ret_339; \
-  __ret_339 = vqsub_s16(__s0_339, vqrdmulh_s16(__s1_339, splat_laneq_s16(__s2_339, __p3_339))); \
+  __ret_339 = vqrdmlah_s16(__s0_339, __s1_339, splat_laneq_s16(__s2_339, __p3_339)); \
   __ret_339; \
 })
 #else
-#define vqrdmlsh_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \
+#define vqrdmlah_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \
   int16x4_t __s0_340 = __p0_340; \
   int16x4_t __s1_340 = __p1_340; \
   int16x8_t __s2_340 = __p2_340; \
@@ -47574,12 +47572,202 @@
   int16x4_t __rev1_340;  __rev1_340 = __builtin_shufflevector(__s1_340, __s1_340, 3, 2, 1, 0); \
   int16x8_t __rev2_340;  __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __ret_340; \
-  __ret_340 = __noswap_vqsub_s16(__rev0_340, __noswap_vqrdmulh_s16(__rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340))); \
+  __ret_340 = __noswap_vqrdmlah_s16(__rev0_340, __rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340)); \
   __ret_340 = __builtin_shufflevector(__ret_340, __ret_340, 3, 2, 1, 0); \
   __ret_340; \
 })
 #endif
 
+__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqrdmlshs_s32(__p0, __p1, __p2);
+  return __ret;
+}
+__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqrdmlshh_s16(__p0, __p1, __p2);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshs_lane_s32(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \
+  int32_t __s0_341 = __p0_341; \
+  int32_t __s1_341 = __p1_341; \
+  int32x2_t __s2_341 = __p2_341; \
+  int32_t __ret_341; \
+  __ret_341 = vqrdmlshs_s32(__s0_341, __s1_341, vget_lane_s32(__s2_341, __p3_341)); \
+  __ret_341; \
+})
+#else
+#define vqrdmlshs_lane_s32(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \
+  int32_t __s0_342 = __p0_342; \
+  int32_t __s1_342 = __p1_342; \
+  int32x2_t __s2_342 = __p2_342; \
+  int32x2_t __rev2_342;  __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 1, 0); \
+  int32_t __ret_342; \
+  __ret_342 = vqrdmlshs_s32(__s0_342, __s1_342, __noswap_vget_lane_s32(__rev2_342, __p3_342)); \
+  __ret_342; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshh_lane_s16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \
+  int16_t __s0_343 = __p0_343; \
+  int16_t __s1_343 = __p1_343; \
+  int16x4_t __s2_343 = __p2_343; \
+  int16_t __ret_343; \
+  __ret_343 = vqrdmlshh_s16(__s0_343, __s1_343, vget_lane_s16(__s2_343, __p3_343)); \
+  __ret_343; \
+})
+#else
+#define vqrdmlshh_lane_s16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \
+  int16_t __s0_344 = __p0_344; \
+  int16_t __s1_344 = __p1_344; \
+  int16x4_t __s2_344 = __p2_344; \
+  int16x4_t __rev2_344;  __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \
+  int16_t __ret_344; \
+  __ret_344 = vqrdmlshh_s16(__s0_344, __s1_344, __noswap_vget_lane_s16(__rev2_344, __p3_344)); \
+  __ret_344; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshs_laneq_s32(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \
+  int32_t __s0_345 = __p0_345; \
+  int32_t __s1_345 = __p1_345; \
+  int32x4_t __s2_345 = __p2_345; \
+  int32_t __ret_345; \
+  __ret_345 = vqrdmlshs_s32(__s0_345, __s1_345, vgetq_lane_s32(__s2_345, __p3_345)); \
+  __ret_345; \
+})
+#else
+#define vqrdmlshs_laneq_s32(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \
+  int32_t __s0_346 = __p0_346; \
+  int32_t __s1_346 = __p1_346; \
+  int32x4_t __s2_346 = __p2_346; \
+  int32x4_t __rev2_346;  __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 3, 2, 1, 0); \
+  int32_t __ret_346; \
+  __ret_346 = vqrdmlshs_s32(__s0_346, __s1_346, __noswap_vgetq_lane_s32(__rev2_346, __p3_346)); \
+  __ret_346; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshh_laneq_s16(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \
+  int16_t __s0_347 = __p0_347; \
+  int16_t __s1_347 = __p1_347; \
+  int16x8_t __s2_347 = __p2_347; \
+  int16_t __ret_347; \
+  __ret_347 = vqrdmlshh_s16(__s0_347, __s1_347, vgetq_lane_s16(__s2_347, __p3_347)); \
+  __ret_347; \
+})
+#else
+#define vqrdmlshh_laneq_s16(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \
+  int16_t __s0_348 = __p0_348; \
+  int16_t __s1_348 = __p1_348; \
+  int16x8_t __s2_348 = __p2_348; \
+  int16x8_t __rev2_348;  __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_348; \
+  __ret_348 = vqrdmlshh_s16(__s0_348, __s1_348, __noswap_vgetq_lane_s16(__rev2_348, __p3_348)); \
+  __ret_348; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_laneq_s32(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \
+  int32x4_t __s0_349 = __p0_349; \
+  int32x4_t __s1_349 = __p1_349; \
+  int32x4_t __s2_349 = __p2_349; \
+  int32x4_t __ret_349; \
+  __ret_349 = vqrdmlshq_s32(__s0_349, __s1_349, splatq_laneq_s32(__s2_349, __p3_349)); \
+  __ret_349; \
+})
+#else
+#define vqrdmlshq_laneq_s32(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \
+  int32x4_t __s0_350 = __p0_350; \
+  int32x4_t __s1_350 = __p1_350; \
+  int32x4_t __s2_350 = __p2_350; \
+  int32x4_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 3, 2, 1, 0); \
+  int32x4_t __rev1_350;  __rev1_350 = __builtin_shufflevector(__s1_350, __s1_350, 3, 2, 1, 0); \
+  int32x4_t __rev2_350;  __rev2_350 = __builtin_shufflevector(__s2_350, __s2_350, 3, 2, 1, 0); \
+  int32x4_t __ret_350; \
+  __ret_350 = __noswap_vqrdmlshq_s32(__rev0_350, __rev1_350, __noswap_splatq_laneq_s32(__rev2_350, __p3_350)); \
+  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 3, 2, 1, 0); \
+  __ret_350; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_laneq_s16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \
+  int16x8_t __s0_351 = __p0_351; \
+  int16x8_t __s1_351 = __p1_351; \
+  int16x8_t __s2_351 = __p2_351; \
+  int16x8_t __ret_351; \
+  __ret_351 = vqrdmlshq_s16(__s0_351, __s1_351, splatq_laneq_s16(__s2_351, __p3_351)); \
+  __ret_351; \
+})
+#else
+#define vqrdmlshq_laneq_s16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \
+  int16x8_t __s0_352 = __p0_352; \
+  int16x8_t __s1_352 = __p1_352; \
+  int16x8_t __s2_352 = __p2_352; \
+  int16x8_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_352;  __rev1_352 = __builtin_shufflevector(__s1_352, __s1_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_352;  __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_352; \
+  __ret_352 = __noswap_vqrdmlshq_s16(__rev0_352, __rev1_352, __noswap_splatq_laneq_s16(__rev2_352, __p3_352)); \
+  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_352; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_laneq_s32(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \
+  int32x2_t __s0_353 = __p0_353; \
+  int32x2_t __s1_353 = __p1_353; \
+  int32x4_t __s2_353 = __p2_353; \
+  int32x2_t __ret_353; \
+  __ret_353 = vqrdmlsh_s32(__s0_353, __s1_353, splat_laneq_s32(__s2_353, __p3_353)); \
+  __ret_353; \
+})
+#else
+#define vqrdmlsh_laneq_s32(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \
+  int32x2_t __s0_354 = __p0_354; \
+  int32x2_t __s1_354 = __p1_354; \
+  int32x4_t __s2_354 = __p2_354; \
+  int32x2_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 1, 0); \
+  int32x2_t __rev1_354;  __rev1_354 = __builtin_shufflevector(__s1_354, __s1_354, 1, 0); \
+  int32x4_t __rev2_354;  __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 3, 2, 1, 0); \
+  int32x2_t __ret_354; \
+  __ret_354 = __noswap_vqrdmlsh_s32(__rev0_354, __rev1_354, __noswap_splat_laneq_s32(__rev2_354, __p3_354)); \
+  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 1, 0); \
+  __ret_354; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_laneq_s16(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \
+  int16x4_t __s0_355 = __p0_355; \
+  int16x4_t __s1_355 = __p1_355; \
+  int16x8_t __s2_355 = __p2_355; \
+  int16x4_t __ret_355; \
+  __ret_355 = vqrdmlsh_s16(__s0_355, __s1_355, splat_laneq_s16(__s2_355, __p3_355)); \
+  __ret_355; \
+})
+#else
+#define vqrdmlsh_laneq_s16(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \
+  int16x4_t __s0_356 = __p0_356; \
+  int16x4_t __s1_356 = __p1_356; \
+  int16x8_t __s2_356 = __p2_356; \
+  int16x4_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \
+  int16x4_t __rev1_356;  __rev1_356 = __builtin_shufflevector(__s1_356, __s1_356, 3, 2, 1, 0); \
+  int16x8_t __rev2_356;  __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_356; \
+  __ret_356 = __noswap_vqrdmlsh_s16(__rev0_356, __rev1_356, __noswap_splat_laneq_s16(__rev2_356, __p3_356)); \
+  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \
+  __ret_356; \
+})
+#endif
+
 #endif
 #if defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
@@ -49998,895 +50186,895 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p8(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \
-  poly8x16_t __s0_341 = __p0_341; \
-  poly8x8_t __s2_341 = __p2_341; \
-  poly8x16_t __ret_341; \
-  __ret_341 = vsetq_lane_p8(vget_lane_p8(__s2_341, __p3_341), __s0_341, __p1_341); \
-  __ret_341; \
-})
-#else
-#define vcopyq_lane_p8(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \
-  poly8x16_t __s0_342 = __p0_342; \
-  poly8x8_t __s2_342 = __p2_342; \
-  poly8x16_t __rev0_342;  __rev0_342 = __builtin_shufflevector(__s0_342, __s0_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_342;  __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_342; \
-  __ret_342 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_342, __p3_342), __rev0_342, __p1_342); \
-  __ret_342 = __builtin_shufflevector(__ret_342, __ret_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_342; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \
-  poly16x8_t __s0_343 = __p0_343; \
-  poly16x4_t __s2_343 = __p2_343; \
-  poly16x8_t __ret_343; \
-  __ret_343 = vsetq_lane_p16(vget_lane_p16(__s2_343, __p3_343), __s0_343, __p1_343); \
-  __ret_343; \
-})
-#else
-#define vcopyq_lane_p16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \
-  poly16x8_t __s0_344 = __p0_344; \
-  poly16x4_t __s2_344 = __p2_344; \
-  poly16x8_t __rev0_344;  __rev0_344 = __builtin_shufflevector(__s0_344, __s0_344, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __rev2_344;  __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \
-  poly16x8_t __ret_344; \
-  __ret_344 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_344, __p3_344), __rev0_344, __p1_344); \
-  __ret_344 = __builtin_shufflevector(__ret_344, __ret_344, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_344; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u8(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \
-  uint8x16_t __s0_345 = __p0_345; \
-  uint8x8_t __s2_345 = __p2_345; \
-  uint8x16_t __ret_345; \
-  __ret_345 = vsetq_lane_u8(vget_lane_u8(__s2_345, __p3_345), __s0_345, __p1_345); \
-  __ret_345; \
-})
-#else
-#define vcopyq_lane_u8(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \
-  uint8x16_t __s0_346 = __p0_346; \
-  uint8x8_t __s2_346 = __p2_346; \
-  uint8x16_t __rev0_346;  __rev0_346 = __builtin_shufflevector(__s0_346, __s0_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_346;  __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_346; \
-  __ret_346 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_346, __p3_346), __rev0_346, __p1_346); \
-  __ret_346 = __builtin_shufflevector(__ret_346, __ret_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_346; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u32(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \
-  uint32x4_t __s0_347 = __p0_347; \
-  uint32x2_t __s2_347 = __p2_347; \
-  uint32x4_t __ret_347; \
-  __ret_347 = vsetq_lane_u32(vget_lane_u32(__s2_347, __p3_347), __s0_347, __p1_347); \
-  __ret_347; \
-})
-#else
-#define vcopyq_lane_u32(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \
-  uint32x4_t __s0_348 = __p0_348; \
-  uint32x2_t __s2_348 = __p2_348; \
-  uint32x4_t __rev0_348;  __rev0_348 = __builtin_shufflevector(__s0_348, __s0_348, 3, 2, 1, 0); \
-  uint32x2_t __rev2_348;  __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 1, 0); \
-  uint32x4_t __ret_348; \
-  __ret_348 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_348, __p3_348), __rev0_348, __p1_348); \
-  __ret_348 = __builtin_shufflevector(__ret_348, __ret_348, 3, 2, 1, 0); \
-  __ret_348; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u64(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \
-  uint64x2_t __s0_349 = __p0_349; \
-  uint64x1_t __s2_349 = __p2_349; \
-  uint64x2_t __ret_349; \
-  __ret_349 = vsetq_lane_u64(vget_lane_u64(__s2_349, __p3_349), __s0_349, __p1_349); \
-  __ret_349; \
-})
-#else
-#define vcopyq_lane_u64(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \
-  uint64x2_t __s0_350 = __p0_350; \
-  uint64x1_t __s2_350 = __p2_350; \
-  uint64x2_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 1, 0); \
-  uint64x2_t __ret_350; \
-  __ret_350 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_350, __p3_350), __rev0_350, __p1_350); \
-  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 1, 0); \
-  __ret_350; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \
-  uint16x8_t __s0_351 = __p0_351; \
-  uint16x4_t __s2_351 = __p2_351; \
-  uint16x8_t __ret_351; \
-  __ret_351 = vsetq_lane_u16(vget_lane_u16(__s2_351, __p3_351), __s0_351, __p1_351); \
-  __ret_351; \
-})
-#else
-#define vcopyq_lane_u16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \
-  uint16x8_t __s0_352 = __p0_352; \
-  uint16x4_t __s2_352 = __p2_352; \
-  uint16x8_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_352;  __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 3, 2, 1, 0); \
-  uint16x8_t __ret_352; \
-  __ret_352 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_352, __p3_352), __rev0_352, __p1_352); \
-  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_352; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s8(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \
-  int8x16_t __s0_353 = __p0_353; \
-  int8x8_t __s2_353 = __p2_353; \
-  int8x16_t __ret_353; \
-  __ret_353 = vsetq_lane_s8(vget_lane_s8(__s2_353, __p3_353), __s0_353, __p1_353); \
-  __ret_353; \
-})
-#else
-#define vcopyq_lane_s8(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \
-  int8x16_t __s0_354 = __p0_354; \
-  int8x8_t __s2_354 = __p2_354; \
-  int8x16_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_354;  __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_354; \
-  __ret_354 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_354, __p3_354), __rev0_354, __p1_354); \
-  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_354; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f32(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \
-  float32x4_t __s0_355 = __p0_355; \
-  float32x2_t __s2_355 = __p2_355; \
-  float32x4_t __ret_355; \
-  __ret_355 = vsetq_lane_f32(vget_lane_f32(__s2_355, __p3_355), __s0_355, __p1_355); \
-  __ret_355; \
-})
-#else
-#define vcopyq_lane_f32(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \
-  float32x4_t __s0_356 = __p0_356; \
-  float32x2_t __s2_356 = __p2_356; \
-  float32x4_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \
-  float32x2_t __rev2_356;  __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 1, 0); \
-  float32x4_t __ret_356; \
-  __ret_356 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_356, __p3_356), __rev0_356, __p1_356); \
-  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \
-  __ret_356; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s32(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \
-  int32x4_t __s0_357 = __p0_357; \
-  int32x2_t __s2_357 = __p2_357; \
-  int32x4_t __ret_357; \
-  __ret_357 = vsetq_lane_s32(vget_lane_s32(__s2_357, __p3_357), __s0_357, __p1_357); \
+#define vcopyq_lane_p8(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \
+  poly8x16_t __s0_357 = __p0_357; \
+  poly8x8_t __s2_357 = __p2_357; \
+  poly8x16_t __ret_357; \
+  __ret_357 = vsetq_lane_p8(vget_lane_p8(__s2_357, __p3_357), __s0_357, __p1_357); \
   __ret_357; \
 })
 #else
-#define vcopyq_lane_s32(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \
-  int32x4_t __s0_358 = __p0_358; \
-  int32x2_t __s2_358 = __p2_358; \
-  int32x4_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 3, 2, 1, 0); \
-  int32x2_t __rev2_358;  __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 1, 0); \
-  int32x4_t __ret_358; \
-  __ret_358 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_358, __p3_358), __rev0_358, __p1_358); \
-  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 3, 2, 1, 0); \
+#define vcopyq_lane_p8(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \
+  poly8x16_t __s0_358 = __p0_358; \
+  poly8x8_t __s2_358 = __p2_358; \
+  poly8x16_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __rev2_358;  __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_358; \
+  __ret_358 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_358, __p3_358), __rev0_358, __p1_358); \
+  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_358; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s64(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \
-  int64x2_t __s0_359 = __p0_359; \
-  int64x1_t __s2_359 = __p2_359; \
-  int64x2_t __ret_359; \
-  __ret_359 = vsetq_lane_s64(vget_lane_s64(__s2_359, __p3_359), __s0_359, __p1_359); \
+#define vcopyq_lane_p16(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \
+  poly16x8_t __s0_359 = __p0_359; \
+  poly16x4_t __s2_359 = __p2_359; \
+  poly16x8_t __ret_359; \
+  __ret_359 = vsetq_lane_p16(vget_lane_p16(__s2_359, __p3_359), __s0_359, __p1_359); \
   __ret_359; \
 })
 #else
-#define vcopyq_lane_s64(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \
-  int64x2_t __s0_360 = __p0_360; \
-  int64x1_t __s2_360 = __p2_360; \
-  int64x2_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 1, 0); \
-  int64x2_t __ret_360; \
-  __ret_360 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_360, __p3_360), __rev0_360, __p1_360); \
-  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 1, 0); \
+#define vcopyq_lane_p16(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \
+  poly16x8_t __s0_360 = __p0_360; \
+  poly16x4_t __s2_360 = __p2_360; \
+  poly16x8_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __rev2_360;  __rev2_360 = __builtin_shufflevector(__s2_360, __s2_360, 3, 2, 1, 0); \
+  poly16x8_t __ret_360; \
+  __ret_360 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_360, __p3_360), __rev0_360, __p1_360); \
+  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_360; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s16(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \
-  int16x8_t __s0_361 = __p0_361; \
-  int16x4_t __s2_361 = __p2_361; \
-  int16x8_t __ret_361; \
-  __ret_361 = vsetq_lane_s16(vget_lane_s16(__s2_361, __p3_361), __s0_361, __p1_361); \
+#define vcopyq_lane_u8(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \
+  uint8x16_t __s0_361 = __p0_361; \
+  uint8x8_t __s2_361 = __p2_361; \
+  uint8x16_t __ret_361; \
+  __ret_361 = vsetq_lane_u8(vget_lane_u8(__s2_361, __p3_361), __s0_361, __p1_361); \
   __ret_361; \
 })
 #else
-#define vcopyq_lane_s16(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \
-  int16x8_t __s0_362 = __p0_362; \
-  int16x4_t __s2_362 = __p2_362; \
-  int16x8_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_362;  __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 3, 2, 1, 0); \
-  int16x8_t __ret_362; \
-  __ret_362 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_362, __p3_362), __rev0_362, __p1_362); \
-  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopyq_lane_u8(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \
+  uint8x16_t __s0_362 = __p0_362; \
+  uint8x8_t __s2_362 = __p2_362; \
+  uint8x16_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_362;  __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_362; \
+  __ret_362 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_362, __p3_362), __rev0_362, __p1_362); \
+  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_362; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p8(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \
-  poly8x8_t __s0_363 = __p0_363; \
-  poly8x8_t __s2_363 = __p2_363; \
-  poly8x8_t __ret_363; \
-  __ret_363 = vset_lane_p8(vget_lane_p8(__s2_363, __p3_363), __s0_363, __p1_363); \
+#define vcopyq_lane_u32(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \
+  uint32x4_t __s0_363 = __p0_363; \
+  uint32x2_t __s2_363 = __p2_363; \
+  uint32x4_t __ret_363; \
+  __ret_363 = vsetq_lane_u32(vget_lane_u32(__s2_363, __p3_363), __s0_363, __p1_363); \
   __ret_363; \
 })
 #else
-#define vcopy_lane_p8(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \
-  poly8x8_t __s0_364 = __p0_364; \
-  poly8x8_t __s2_364 = __p2_364; \
-  poly8x8_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_364;  __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_364; \
-  __ret_364 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_364, __p3_364), __rev0_364, __p1_364); \
-  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopyq_lane_u32(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \
+  uint32x4_t __s0_364 = __p0_364; \
+  uint32x2_t __s2_364 = __p2_364; \
+  uint32x4_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 3, 2, 1, 0); \
+  uint32x2_t __rev2_364;  __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 1, 0); \
+  uint32x4_t __ret_364; \
+  __ret_364 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_364, __p3_364), __rev0_364, __p1_364); \
+  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 3, 2, 1, 0); \
   __ret_364; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p16(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \
-  poly16x4_t __s0_365 = __p0_365; \
-  poly16x4_t __s2_365 = __p2_365; \
-  poly16x4_t __ret_365; \
-  __ret_365 = vset_lane_p16(vget_lane_p16(__s2_365, __p3_365), __s0_365, __p1_365); \
+#define vcopyq_lane_u64(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \
+  uint64x2_t __s0_365 = __p0_365; \
+  uint64x1_t __s2_365 = __p2_365; \
+  uint64x2_t __ret_365; \
+  __ret_365 = vsetq_lane_u64(vget_lane_u64(__s2_365, __p3_365), __s0_365, __p1_365); \
   __ret_365; \
 })
 #else
-#define vcopy_lane_p16(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \
-  poly16x4_t __s0_366 = __p0_366; \
-  poly16x4_t __s2_366 = __p2_366; \
-  poly16x4_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 3, 2, 1, 0); \
-  poly16x4_t __rev2_366;  __rev2_366 = __builtin_shufflevector(__s2_366, __s2_366, 3, 2, 1, 0); \
-  poly16x4_t __ret_366; \
-  __ret_366 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_366, __p3_366), __rev0_366, __p1_366); \
-  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 3, 2, 1, 0); \
+#define vcopyq_lane_u64(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \
+  uint64x2_t __s0_366 = __p0_366; \
+  uint64x1_t __s2_366 = __p2_366; \
+  uint64x2_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 1, 0); \
+  uint64x2_t __ret_366; \
+  __ret_366 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_366, __p3_366), __rev0_366, __p1_366); \
+  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \
   __ret_366; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u8(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \
-  uint8x8_t __s0_367 = __p0_367; \
-  uint8x8_t __s2_367 = __p2_367; \
-  uint8x8_t __ret_367; \
-  __ret_367 = vset_lane_u8(vget_lane_u8(__s2_367, __p3_367), __s0_367, __p1_367); \
+#define vcopyq_lane_u16(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \
+  uint16x8_t __s0_367 = __p0_367; \
+  uint16x4_t __s2_367 = __p2_367; \
+  uint16x8_t __ret_367; \
+  __ret_367 = vsetq_lane_u16(vget_lane_u16(__s2_367, __p3_367), __s0_367, __p1_367); \
   __ret_367; \
 })
 #else
-#define vcopy_lane_u8(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \
-  uint8x8_t __s0_368 = __p0_368; \
-  uint8x8_t __s2_368 = __p2_368; \
-  uint8x8_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_368;  __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_368; \
-  __ret_368 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_368, __p3_368), __rev0_368, __p1_368); \
+#define vcopyq_lane_u16(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \
+  uint16x8_t __s0_368 = __p0_368; \
+  uint16x4_t __s2_368 = __p2_368; \
+  uint16x8_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_368;  __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 3, 2, 1, 0); \
+  uint16x8_t __ret_368; \
+  __ret_368 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_368, __p3_368), __rev0_368, __p1_368); \
   __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_368; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u32(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \
-  uint32x2_t __s0_369 = __p0_369; \
-  uint32x2_t __s2_369 = __p2_369; \
-  uint32x2_t __ret_369; \
-  __ret_369 = vset_lane_u32(vget_lane_u32(__s2_369, __p3_369), __s0_369, __p1_369); \
+#define vcopyq_lane_s8(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \
+  int8x16_t __s0_369 = __p0_369; \
+  int8x8_t __s2_369 = __p2_369; \
+  int8x16_t __ret_369; \
+  __ret_369 = vsetq_lane_s8(vget_lane_s8(__s2_369, __p3_369), __s0_369, __p1_369); \
   __ret_369; \
 })
 #else
-#define vcopy_lane_u32(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \
-  uint32x2_t __s0_370 = __p0_370; \
-  uint32x2_t __s2_370 = __p2_370; \
-  uint32x2_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 1, 0); \
-  uint32x2_t __rev2_370;  __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 1, 0); \
-  uint32x2_t __ret_370; \
-  __ret_370 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_370, __p3_370), __rev0_370, __p1_370); \
-  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 1, 0); \
+#define vcopyq_lane_s8(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \
+  int8x16_t __s0_370 = __p0_370; \
+  int8x8_t __s2_370 = __p2_370; \
+  int8x16_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_370;  __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_370; \
+  __ret_370 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_370, __p3_370), __rev0_370, __p1_370); \
+  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_370; \
 })
 #endif
 
-#define vcopy_lane_u64(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \
-  uint64x1_t __s0_371 = __p0_371; \
-  uint64x1_t __s2_371 = __p2_371; \
-  uint64x1_t __ret_371; \
-  __ret_371 = vset_lane_u64(vget_lane_u64(__s2_371, __p3_371), __s0_371, __p1_371); \
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_f32(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \
+  float32x4_t __s0_371 = __p0_371; \
+  float32x2_t __s2_371 = __p2_371; \
+  float32x4_t __ret_371; \
+  __ret_371 = vsetq_lane_f32(vget_lane_f32(__s2_371, __p3_371), __s0_371, __p1_371); \
   __ret_371; \
 })
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u16(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \
-  uint16x4_t __s0_372 = __p0_372; \
-  uint16x4_t __s2_372 = __p2_372; \
-  uint16x4_t __ret_372; \
-  __ret_372 = vset_lane_u16(vget_lane_u16(__s2_372, __p3_372), __s0_372, __p1_372); \
+#else
+#define vcopyq_lane_f32(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \
+  float32x4_t __s0_372 = __p0_372; \
+  float32x2_t __s2_372 = __p2_372; \
+  float32x4_t __rev0_372;  __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 3, 2, 1, 0); \
+  float32x2_t __rev2_372;  __rev2_372 = __builtin_shufflevector(__s2_372, __s2_372, 1, 0); \
+  float32x4_t __ret_372; \
+  __ret_372 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_372, __p3_372), __rev0_372, __p1_372); \
+  __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 3, 2, 1, 0); \
   __ret_372; \
 })
-#else
-#define vcopy_lane_u16(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \
-  uint16x4_t __s0_373 = __p0_373; \
-  uint16x4_t __s2_373 = __p2_373; \
-  uint16x4_t __rev0_373;  __rev0_373 = __builtin_shufflevector(__s0_373, __s0_373, 3, 2, 1, 0); \
-  uint16x4_t __rev2_373;  __rev2_373 = __builtin_shufflevector(__s2_373, __s2_373, 3, 2, 1, 0); \
-  uint16x4_t __ret_373; \
-  __ret_373 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_373, __p3_373), __rev0_373, __p1_373); \
-  __ret_373 = __builtin_shufflevector(__ret_373, __ret_373, 3, 2, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s32(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \
+  int32x4_t __s0_373 = __p0_373; \
+  int32x2_t __s2_373 = __p2_373; \
+  int32x4_t __ret_373; \
+  __ret_373 = vsetq_lane_s32(vget_lane_s32(__s2_373, __p3_373), __s0_373, __p1_373); \
   __ret_373; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s8(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \
-  int8x8_t __s0_374 = __p0_374; \
-  int8x8_t __s2_374 = __p2_374; \
-  int8x8_t __ret_374; \
-  __ret_374 = vset_lane_s8(vget_lane_s8(__s2_374, __p3_374), __s0_374, __p1_374); \
+#else
+#define vcopyq_lane_s32(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \
+  int32x4_t __s0_374 = __p0_374; \
+  int32x2_t __s2_374 = __p2_374; \
+  int32x4_t __rev0_374;  __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \
+  int32x2_t __rev2_374;  __rev2_374 = __builtin_shufflevector(__s2_374, __s2_374, 1, 0); \
+  int32x4_t __ret_374; \
+  __ret_374 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_374, __p3_374), __rev0_374, __p1_374); \
+  __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \
   __ret_374; \
 })
-#else
-#define vcopy_lane_s8(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \
-  int8x8_t __s0_375 = __p0_375; \
-  int8x8_t __s2_375 = __p2_375; \
-  int8x8_t __rev0_375;  __rev0_375 = __builtin_shufflevector(__s0_375, __s0_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_375;  __rev2_375 = __builtin_shufflevector(__s2_375, __s2_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_375; \
-  __ret_375 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_375, __p3_375), __rev0_375, __p1_375); \
-  __ret_375 = __builtin_shufflevector(__ret_375, __ret_375, 7, 6, 5, 4, 3, 2, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s64(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \
+  int64x2_t __s0_375 = __p0_375; \
+  int64x1_t __s2_375 = __p2_375; \
+  int64x2_t __ret_375; \
+  __ret_375 = vsetq_lane_s64(vget_lane_s64(__s2_375, __p3_375), __s0_375, __p1_375); \
   __ret_375; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_f32(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \
-  float32x2_t __s0_376 = __p0_376; \
-  float32x2_t __s2_376 = __p2_376; \
-  float32x2_t __ret_376; \
-  __ret_376 = vset_lane_f32(vget_lane_f32(__s2_376, __p3_376), __s0_376, __p1_376); \
+#else
+#define vcopyq_lane_s64(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \
+  int64x2_t __s0_376 = __p0_376; \
+  int64x1_t __s2_376 = __p2_376; \
+  int64x2_t __rev0_376;  __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 1, 0); \
+  int64x2_t __ret_376; \
+  __ret_376 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_376, __p3_376), __rev0_376, __p1_376); \
+  __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 1, 0); \
   __ret_376; \
 })
-#else
-#define vcopy_lane_f32(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \
-  float32x2_t __s0_377 = __p0_377; \
-  float32x2_t __s2_377 = __p2_377; \
-  float32x2_t __rev0_377;  __rev0_377 = __builtin_shufflevector(__s0_377, __s0_377, 1, 0); \
-  float32x2_t __rev2_377;  __rev2_377 = __builtin_shufflevector(__s2_377, __s2_377, 1, 0); \
-  float32x2_t __ret_377; \
-  __ret_377 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_377, __p3_377), __rev0_377, __p1_377); \
-  __ret_377 = __builtin_shufflevector(__ret_377, __ret_377, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s16(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \
+  int16x8_t __s0_377 = __p0_377; \
+  int16x4_t __s2_377 = __p2_377; \
+  int16x8_t __ret_377; \
+  __ret_377 = vsetq_lane_s16(vget_lane_s16(__s2_377, __p3_377), __s0_377, __p1_377); \
   __ret_377; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s32(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \
-  int32x2_t __s0_378 = __p0_378; \
-  int32x2_t __s2_378 = __p2_378; \
-  int32x2_t __ret_378; \
-  __ret_378 = vset_lane_s32(vget_lane_s32(__s2_378, __p3_378), __s0_378, __p1_378); \
+#else
+#define vcopyq_lane_s16(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \
+  int16x8_t __s0_378 = __p0_378; \
+  int16x4_t __s2_378 = __p2_378; \
+  int16x8_t __rev0_378;  __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_378;  __rev2_378 = __builtin_shufflevector(__s2_378, __s2_378, 3, 2, 1, 0); \
+  int16x8_t __ret_378; \
+  __ret_378 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_378, __p3_378), __rev0_378, __p1_378); \
+  __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_378; \
 })
-#else
-#define vcopy_lane_s32(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \
-  int32x2_t __s0_379 = __p0_379; \
-  int32x2_t __s2_379 = __p2_379; \
-  int32x2_t __rev0_379;  __rev0_379 = __builtin_shufflevector(__s0_379, __s0_379, 1, 0); \
-  int32x2_t __rev2_379;  __rev2_379 = __builtin_shufflevector(__s2_379, __s2_379, 1, 0); \
-  int32x2_t __ret_379; \
-  __ret_379 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_379, __p3_379), __rev0_379, __p1_379); \
-  __ret_379 = __builtin_shufflevector(__ret_379, __ret_379, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_p8(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \
+  poly8x8_t __s0_379 = __p0_379; \
+  poly8x8_t __s2_379 = __p2_379; \
+  poly8x8_t __ret_379; \
+  __ret_379 = vset_lane_p8(vget_lane_p8(__s2_379, __p3_379), __s0_379, __p1_379); \
   __ret_379; \
 })
+#else
+#define vcopy_lane_p8(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \
+  poly8x8_t __s0_380 = __p0_380; \
+  poly8x8_t __s2_380 = __p2_380; \
+  poly8x8_t __rev0_380;  __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __rev2_380;  __rev2_380 = __builtin_shufflevector(__s2_380, __s2_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_380; \
+  __ret_380 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_380, __p3_380), __rev0_380, __p1_380); \
+  __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_380; \
+})
 #endif
 
-#define vcopy_lane_s64(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \
-  int64x1_t __s0_380 = __p0_380; \
-  int64x1_t __s2_380 = __p2_380; \
-  int64x1_t __ret_380; \
-  __ret_380 = vset_lane_s64(vget_lane_s64(__s2_380, __p3_380), __s0_380, __p1_380); \
-  __ret_380; \
-})
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \
-  int16x4_t __s0_381 = __p0_381; \
-  int16x4_t __s2_381 = __p2_381; \
-  int16x4_t __ret_381; \
-  __ret_381 = vset_lane_s16(vget_lane_s16(__s2_381, __p3_381), __s0_381, __p1_381); \
+#define vcopy_lane_p16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \
+  poly16x4_t __s0_381 = __p0_381; \
+  poly16x4_t __s2_381 = __p2_381; \
+  poly16x4_t __ret_381; \
+  __ret_381 = vset_lane_p16(vget_lane_p16(__s2_381, __p3_381), __s0_381, __p1_381); \
   __ret_381; \
 })
 #else
-#define vcopy_lane_s16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \
-  int16x4_t __s0_382 = __p0_382; \
-  int16x4_t __s2_382 = __p2_382; \
-  int16x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
-  int16x4_t __rev2_382;  __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \
-  int16x4_t __ret_382; \
-  __ret_382 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_382, __p3_382), __rev0_382, __p1_382); \
+#define vcopy_lane_p16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \
+  poly16x4_t __s0_382 = __p0_382; \
+  poly16x4_t __s2_382 = __p2_382; \
+  poly16x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
+  poly16x4_t __rev2_382;  __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \
+  poly16x4_t __ret_382; \
+  __ret_382 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_382, __p3_382), __rev0_382, __p1_382); \
   __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 3, 2, 1, 0); \
   __ret_382; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \
-  poly8x16_t __s0_383 = __p0_383; \
-  poly8x16_t __s2_383 = __p2_383; \
-  poly8x16_t __ret_383; \
-  __ret_383 = vsetq_lane_p8(vgetq_lane_p8(__s2_383, __p3_383), __s0_383, __p1_383); \
+#define vcopy_lane_u8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \
+  uint8x8_t __s0_383 = __p0_383; \
+  uint8x8_t __s2_383 = __p2_383; \
+  uint8x8_t __ret_383; \
+  __ret_383 = vset_lane_u8(vget_lane_u8(__s2_383, __p3_383), __s0_383, __p1_383); \
   __ret_383; \
 })
 #else
-#define vcopyq_laneq_p8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \
-  poly8x16_t __s0_384 = __p0_384; \
-  poly8x16_t __s2_384 = __p2_384; \
-  poly8x16_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_384;  __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_384; \
-  __ret_384 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_384, __p3_384), __rev0_384, __p1_384); \
-  __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopy_lane_u8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \
+  uint8x8_t __s0_384 = __p0_384; \
+  uint8x8_t __s2_384 = __p2_384; \
+  uint8x8_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_384;  __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_384; \
+  __ret_384 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_384, __p3_384), __rev0_384, __p1_384); \
+  __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_384; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p16(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \
-  poly16x8_t __s0_385 = __p0_385; \
-  poly16x8_t __s2_385 = __p2_385; \
-  poly16x8_t __ret_385; \
-  __ret_385 = vsetq_lane_p16(vgetq_lane_p16(__s2_385, __p3_385), __s0_385, __p1_385); \
+#define vcopy_lane_u32(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \
+  uint32x2_t __s0_385 = __p0_385; \
+  uint32x2_t __s2_385 = __p2_385; \
+  uint32x2_t __ret_385; \
+  __ret_385 = vset_lane_u32(vget_lane_u32(__s2_385, __p3_385), __s0_385, __p1_385); \
   __ret_385; \
 })
 #else
-#define vcopyq_laneq_p16(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \
-  poly16x8_t __s0_386 = __p0_386; \
-  poly16x8_t __s2_386 = __p2_386; \
-  poly16x8_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev2_386;  __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_386; \
-  __ret_386 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_386, __p3_386), __rev0_386, __p1_386); \
-  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopy_lane_u32(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \
+  uint32x2_t __s0_386 = __p0_386; \
+  uint32x2_t __s2_386 = __p2_386; \
+  uint32x2_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 1, 0); \
+  uint32x2_t __rev2_386;  __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 1, 0); \
+  uint32x2_t __ret_386; \
+  __ret_386 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_386, __p3_386), __rev0_386, __p1_386); \
+  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 1, 0); \
   __ret_386; \
 })
 #endif
 
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u8(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \
-  uint8x16_t __s0_387 = __p0_387; \
-  uint8x16_t __s2_387 = __p2_387; \
-  uint8x16_t __ret_387; \
-  __ret_387 = vsetq_lane_u8(vgetq_lane_u8(__s2_387, __p3_387), __s0_387, __p1_387); \
+#define vcopy_lane_u64(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \
+  uint64x1_t __s0_387 = __p0_387; \
+  uint64x1_t __s2_387 = __p2_387; \
+  uint64x1_t __ret_387; \
+  __ret_387 = vset_lane_u64(vget_lane_u64(__s2_387, __p3_387), __s0_387, __p1_387); \
   __ret_387; \
 })
-#else
-#define vcopyq_laneq_u8(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \
-  uint8x16_t __s0_388 = __p0_388; \
-  uint8x16_t __s2_388 = __p2_388; \
-  uint8x16_t __rev0_388;  __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_388;  __rev2_388 = __builtin_shufflevector(__s2_388, __s2_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_388; \
-  __ret_388 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_388, __p3_388), __rev0_388, __p1_388); \
-  __ret_388 = __builtin_shufflevector(__ret_388, __ret_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_u16(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \
+  uint16x4_t __s0_388 = __p0_388; \
+  uint16x4_t __s2_388 = __p2_388; \
+  uint16x4_t __ret_388; \
+  __ret_388 = vset_lane_u16(vget_lane_u16(__s2_388, __p3_388), __s0_388, __p1_388); \
   __ret_388; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u32(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \
-  uint32x4_t __s0_389 = __p0_389; \
-  uint32x4_t __s2_389 = __p2_389; \
-  uint32x4_t __ret_389; \
-  __ret_389 = vsetq_lane_u32(vgetq_lane_u32(__s2_389, __p3_389), __s0_389, __p1_389); \
+#else
+#define vcopy_lane_u16(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \
+  uint16x4_t __s0_389 = __p0_389; \
+  uint16x4_t __s2_389 = __p2_389; \
+  uint16x4_t __rev0_389;  __rev0_389 = __builtin_shufflevector(__s0_389, __s0_389, 3, 2, 1, 0); \
+  uint16x4_t __rev2_389;  __rev2_389 = __builtin_shufflevector(__s2_389, __s2_389, 3, 2, 1, 0); \
+  uint16x4_t __ret_389; \
+  __ret_389 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_389, __p3_389), __rev0_389, __p1_389); \
+  __ret_389 = __builtin_shufflevector(__ret_389, __ret_389, 3, 2, 1, 0); \
   __ret_389; \
 })
-#else
-#define vcopyq_laneq_u32(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \
-  uint32x4_t __s0_390 = __p0_390; \
-  uint32x4_t __s2_390 = __p2_390; \
-  uint32x4_t __rev0_390;  __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 3, 2, 1, 0); \
-  uint32x4_t __rev2_390;  __rev2_390 = __builtin_shufflevector(__s2_390, __s2_390, 3, 2, 1, 0); \
-  uint32x4_t __ret_390; \
-  __ret_390 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_390, __p3_390), __rev0_390, __p1_390); \
-  __ret_390 = __builtin_shufflevector(__ret_390, __ret_390, 3, 2, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s8(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \
+  int8x8_t __s0_390 = __p0_390; \
+  int8x8_t __s2_390 = __p2_390; \
+  int8x8_t __ret_390; \
+  __ret_390 = vset_lane_s8(vget_lane_s8(__s2_390, __p3_390), __s0_390, __p1_390); \
   __ret_390; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u64(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \
-  uint64x2_t __s0_391 = __p0_391; \
-  uint64x2_t __s2_391 = __p2_391; \
-  uint64x2_t __ret_391; \
-  __ret_391 = vsetq_lane_u64(vgetq_lane_u64(__s2_391, __p3_391), __s0_391, __p1_391); \
+#else
+#define vcopy_lane_s8(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \
+  int8x8_t __s0_391 = __p0_391; \
+  int8x8_t __s2_391 = __p2_391; \
+  int8x8_t __rev0_391;  __rev0_391 = __builtin_shufflevector(__s0_391, __s0_391, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_391;  __rev2_391 = __builtin_shufflevector(__s2_391, __s2_391, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_391; \
+  __ret_391 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_391, __p3_391), __rev0_391, __p1_391); \
+  __ret_391 = __builtin_shufflevector(__ret_391, __ret_391, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_391; \
 })
-#else
-#define vcopyq_laneq_u64(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \
-  uint64x2_t __s0_392 = __p0_392; \
-  uint64x2_t __s2_392 = __p2_392; \
-  uint64x2_t __rev0_392;  __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 1, 0); \
-  uint64x2_t __rev2_392;  __rev2_392 = __builtin_shufflevector(__s2_392, __s2_392, 1, 0); \
-  uint64x2_t __ret_392; \
-  __ret_392 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_392, __p3_392), __rev0_392, __p1_392); \
-  __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_f32(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \
+  float32x2_t __s0_392 = __p0_392; \
+  float32x2_t __s2_392 = __p2_392; \
+  float32x2_t __ret_392; \
+  __ret_392 = vset_lane_f32(vget_lane_f32(__s2_392, __p3_392), __s0_392, __p1_392); \
   __ret_392; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u16(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \
-  uint16x8_t __s0_393 = __p0_393; \
-  uint16x8_t __s2_393 = __p2_393; \
-  uint16x8_t __ret_393; \
-  __ret_393 = vsetq_lane_u16(vgetq_lane_u16(__s2_393, __p3_393), __s0_393, __p1_393); \
+#else
+#define vcopy_lane_f32(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \
+  float32x2_t __s0_393 = __p0_393; \
+  float32x2_t __s2_393 = __p2_393; \
+  float32x2_t __rev0_393;  __rev0_393 = __builtin_shufflevector(__s0_393, __s0_393, 1, 0); \
+  float32x2_t __rev2_393;  __rev2_393 = __builtin_shufflevector(__s2_393, __s2_393, 1, 0); \
+  float32x2_t __ret_393; \
+  __ret_393 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_393, __p3_393), __rev0_393, __p1_393); \
+  __ret_393 = __builtin_shufflevector(__ret_393, __ret_393, 1, 0); \
   __ret_393; \
 })
-#else
-#define vcopyq_laneq_u16(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \
-  uint16x8_t __s0_394 = __p0_394; \
-  uint16x8_t __s2_394 = __p2_394; \
-  uint16x8_t __rev0_394;  __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_394;  __rev2_394 = __builtin_shufflevector(__s2_394, __s2_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_394; \
-  __ret_394 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_394, __p3_394), __rev0_394, __p1_394); \
-  __ret_394 = __builtin_shufflevector(__ret_394, __ret_394, 7, 6, 5, 4, 3, 2, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s32(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \
+  int32x2_t __s0_394 = __p0_394; \
+  int32x2_t __s2_394 = __p2_394; \
+  int32x2_t __ret_394; \
+  __ret_394 = vset_lane_s32(vget_lane_s32(__s2_394, __p3_394), __s0_394, __p1_394); \
   __ret_394; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s8(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \
-  int8x16_t __s0_395 = __p0_395; \
-  int8x16_t __s2_395 = __p2_395; \
-  int8x16_t __ret_395; \
-  __ret_395 = vsetq_lane_s8(vgetq_lane_s8(__s2_395, __p3_395), __s0_395, __p1_395); \
+#else
+#define vcopy_lane_s32(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \
+  int32x2_t __s0_395 = __p0_395; \
+  int32x2_t __s2_395 = __p2_395; \
+  int32x2_t __rev0_395;  __rev0_395 = __builtin_shufflevector(__s0_395, __s0_395, 1, 0); \
+  int32x2_t __rev2_395;  __rev2_395 = __builtin_shufflevector(__s2_395, __s2_395, 1, 0); \
+  int32x2_t __ret_395; \
+  __ret_395 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_395, __p3_395), __rev0_395, __p1_395); \
+  __ret_395 = __builtin_shufflevector(__ret_395, __ret_395, 1, 0); \
   __ret_395; \
 })
-#else
-#define vcopyq_laneq_s8(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \
-  int8x16_t __s0_396 = __p0_396; \
-  int8x16_t __s2_396 = __p2_396; \
-  int8x16_t __rev0_396;  __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_396;  __rev2_396 = __builtin_shufflevector(__s2_396, __s2_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_396; \
-  __ret_396 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_396, __p3_396), __rev0_396, __p1_396); \
-  __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_396; \
-})
 #endif
 
+#define vcopy_lane_s64(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \
+  int64x1_t __s0_396 = __p0_396; \
+  int64x1_t __s2_396 = __p2_396; \
+  int64x1_t __ret_396; \
+  __ret_396 = vset_lane_s64(vget_lane_s64(__s2_396, __p3_396), __s0_396, __p1_396); \
+  __ret_396; \
+})
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f32(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \
-  float32x4_t __s0_397 = __p0_397; \
-  float32x4_t __s2_397 = __p2_397; \
-  float32x4_t __ret_397; \
-  __ret_397 = vsetq_lane_f32(vgetq_lane_f32(__s2_397, __p3_397), __s0_397, __p1_397); \
+#define vcopy_lane_s16(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \
+  int16x4_t __s0_397 = __p0_397; \
+  int16x4_t __s2_397 = __p2_397; \
+  int16x4_t __ret_397; \
+  __ret_397 = vset_lane_s16(vget_lane_s16(__s2_397, __p3_397), __s0_397, __p1_397); \
   __ret_397; \
 })
 #else
-#define vcopyq_laneq_f32(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \
-  float32x4_t __s0_398 = __p0_398; \
-  float32x4_t __s2_398 = __p2_398; \
-  float32x4_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \
-  float32x4_t __rev2_398;  __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \
-  float32x4_t __ret_398; \
-  __ret_398 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_398, __p3_398), __rev0_398, __p1_398); \
+#define vcopy_lane_s16(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \
+  int16x4_t __s0_398 = __p0_398; \
+  int16x4_t __s2_398 = __p2_398; \
+  int16x4_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \
+  int16x4_t __rev2_398;  __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \
+  int16x4_t __ret_398; \
+  __ret_398 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_398, __p3_398), __rev0_398, __p1_398); \
   __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \
   __ret_398; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s32(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \
-  int32x4_t __s0_399 = __p0_399; \
-  int32x4_t __s2_399 = __p2_399; \
-  int32x4_t __ret_399; \
-  __ret_399 = vsetq_lane_s32(vgetq_lane_s32(__s2_399, __p3_399), __s0_399, __p1_399); \
+#define vcopyq_laneq_p8(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \
+  poly8x16_t __s0_399 = __p0_399; \
+  poly8x16_t __s2_399 = __p2_399; \
+  poly8x16_t __ret_399; \
+  __ret_399 = vsetq_lane_p8(vgetq_lane_p8(__s2_399, __p3_399), __s0_399, __p1_399); \
   __ret_399; \
 })
 #else
-#define vcopyq_laneq_s32(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \
-  int32x4_t __s0_400 = __p0_400; \
-  int32x4_t __s2_400 = __p2_400; \
-  int32x4_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 3, 2, 1, 0); \
-  int32x4_t __rev2_400;  __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 3, 2, 1, 0); \
-  int32x4_t __ret_400; \
-  __ret_400 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_400, __p3_400), __rev0_400, __p1_400); \
-  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 3, 2, 1, 0); \
+#define vcopyq_laneq_p8(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \
+  poly8x16_t __s0_400 = __p0_400; \
+  poly8x16_t __s2_400 = __p2_400; \
+  poly8x16_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __rev2_400;  __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_400; \
+  __ret_400 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_400, __p3_400), __rev0_400, __p1_400); \
+  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_400; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s64(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
-  int64x2_t __s0_401 = __p0_401; \
-  int64x2_t __s2_401 = __p2_401; \
-  int64x2_t __ret_401; \
-  __ret_401 = vsetq_lane_s64(vgetq_lane_s64(__s2_401, __p3_401), __s0_401, __p1_401); \
+#define vcopyq_laneq_p16(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
+  poly16x8_t __s0_401 = __p0_401; \
+  poly16x8_t __s2_401 = __p2_401; \
+  poly16x8_t __ret_401; \
+  __ret_401 = vsetq_lane_p16(vgetq_lane_p16(__s2_401, __p3_401), __s0_401, __p1_401); \
   __ret_401; \
 })
 #else
-#define vcopyq_laneq_s64(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
-  int64x2_t __s0_402 = __p0_402; \
-  int64x2_t __s2_402 = __p2_402; \
-  int64x2_t __rev0_402;  __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 1, 0); \
-  int64x2_t __rev2_402;  __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 1, 0); \
-  int64x2_t __ret_402; \
-  __ret_402 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_402, __p3_402), __rev0_402, __p1_402); \
-  __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 1, 0); \
+#define vcopyq_laneq_p16(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
+  poly16x8_t __s0_402 = __p0_402; \
+  poly16x8_t __s2_402 = __p2_402; \
+  poly16x8_t __rev0_402;  __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __rev2_402;  __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __ret_402; \
+  __ret_402 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_402, __p3_402), __rev0_402, __p1_402); \
+  __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_402; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s16(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
-  int16x8_t __s0_403 = __p0_403; \
-  int16x8_t __s2_403 = __p2_403; \
-  int16x8_t __ret_403; \
-  __ret_403 = vsetq_lane_s16(vgetq_lane_s16(__s2_403, __p3_403), __s0_403, __p1_403); \
+#define vcopyq_laneq_u8(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
+  uint8x16_t __s0_403 = __p0_403; \
+  uint8x16_t __s2_403 = __p2_403; \
+  uint8x16_t __ret_403; \
+  __ret_403 = vsetq_lane_u8(vgetq_lane_u8(__s2_403, __p3_403), __s0_403, __p1_403); \
   __ret_403; \
 })
 #else
-#define vcopyq_laneq_s16(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
-  int16x8_t __s0_404 = __p0_404; \
-  int16x8_t __s2_404 = __p2_404; \
-  int16x8_t __rev0_404;  __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_404;  __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_404; \
-  __ret_404 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_404, __p3_404), __rev0_404, __p1_404); \
-  __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopyq_laneq_u8(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
+  uint8x16_t __s0_404 = __p0_404; \
+  uint8x16_t __s2_404 = __p2_404; \
+  uint8x16_t __rev0_404;  __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_404;  __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_404; \
+  __ret_404 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_404, __p3_404), __rev0_404, __p1_404); \
+  __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_404; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p8(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
-  poly8x8_t __s0_405 = __p0_405; \
-  poly8x16_t __s2_405 = __p2_405; \
-  poly8x8_t __ret_405; \
-  __ret_405 = vset_lane_p8(vgetq_lane_p8(__s2_405, __p3_405), __s0_405, __p1_405); \
+#define vcopyq_laneq_u32(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
+  uint32x4_t __s0_405 = __p0_405; \
+  uint32x4_t __s2_405 = __p2_405; \
+  uint32x4_t __ret_405; \
+  __ret_405 = vsetq_lane_u32(vgetq_lane_u32(__s2_405, __p3_405), __s0_405, __p1_405); \
   __ret_405; \
 })
 #else
-#define vcopy_laneq_p8(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
-  poly8x8_t __s0_406 = __p0_406; \
-  poly8x16_t __s2_406 = __p2_406; \
-  poly8x8_t __rev0_406;  __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_406;  __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_406; \
-  __ret_406 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_406, __p3_406), __rev0_406, __p1_406); \
-  __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopyq_laneq_u32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
+  uint32x4_t __s0_406 = __p0_406; \
+  uint32x4_t __s2_406 = __p2_406; \
+  uint32x4_t __rev0_406;  __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 3, 2, 1, 0); \
+  uint32x4_t __rev2_406;  __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 3, 2, 1, 0); \
+  uint32x4_t __ret_406; \
+  __ret_406 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_406, __p3_406), __rev0_406, __p1_406); \
+  __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 3, 2, 1, 0); \
   __ret_406; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p16(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
-  poly16x4_t __s0_407 = __p0_407; \
-  poly16x8_t __s2_407 = __p2_407; \
-  poly16x4_t __ret_407; \
-  __ret_407 = vset_lane_p16(vgetq_lane_p16(__s2_407, __p3_407), __s0_407, __p1_407); \
+#define vcopyq_laneq_u64(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
+  uint64x2_t __s0_407 = __p0_407; \
+  uint64x2_t __s2_407 = __p2_407; \
+  uint64x2_t __ret_407; \
+  __ret_407 = vsetq_lane_u64(vgetq_lane_u64(__s2_407, __p3_407), __s0_407, __p1_407); \
   __ret_407; \
 })
 #else
-#define vcopy_laneq_p16(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
-  poly16x4_t __s0_408 = __p0_408; \
-  poly16x8_t __s2_408 = __p2_408; \
-  poly16x4_t __rev0_408;  __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 3, 2, 1, 0); \
-  poly16x8_t __rev2_408;  __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_408; \
-  __ret_408 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_408, __p3_408), __rev0_408, __p1_408); \
-  __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 3, 2, 1, 0); \
+#define vcopyq_laneq_u64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
+  uint64x2_t __s0_408 = __p0_408; \
+  uint64x2_t __s2_408 = __p2_408; \
+  uint64x2_t __rev0_408;  __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 1, 0); \
+  uint64x2_t __rev2_408;  __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 1, 0); \
+  uint64x2_t __ret_408; \
+  __ret_408 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_408, __p3_408), __rev0_408, __p1_408); \
+  __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 1, 0); \
   __ret_408; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u8(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
-  uint8x8_t __s0_409 = __p0_409; \
-  uint8x16_t __s2_409 = __p2_409; \
-  uint8x8_t __ret_409; \
-  __ret_409 = vset_lane_u8(vgetq_lane_u8(__s2_409, __p3_409), __s0_409, __p1_409); \
+#define vcopyq_laneq_u16(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
+  uint16x8_t __s0_409 = __p0_409; \
+  uint16x8_t __s2_409 = __p2_409; \
+  uint16x8_t __ret_409; \
+  __ret_409 = vsetq_lane_u16(vgetq_lane_u16(__s2_409, __p3_409), __s0_409, __p1_409); \
   __ret_409; \
 })
 #else
-#define vcopy_laneq_u8(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
-  uint8x8_t __s0_410 = __p0_410; \
-  uint8x16_t __s2_410 = __p2_410; \
-  uint8x8_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_410; \
-  __ret_410 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_410, __p3_410), __rev0_410, __p1_410); \
+#define vcopyq_laneq_u16(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
+  uint16x8_t __s0_410 = __p0_410; \
+  uint16x8_t __s2_410 = __p2_410; \
+  uint16x8_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_410; \
+  __ret_410 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_410, __p3_410), __rev0_410, __p1_410); \
   __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_410; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u32(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
-  uint32x2_t __s0_411 = __p0_411; \
-  uint32x4_t __s2_411 = __p2_411; \
-  uint32x2_t __ret_411; \
-  __ret_411 = vset_lane_u32(vgetq_lane_u32(__s2_411, __p3_411), __s0_411, __p1_411); \
+#define vcopyq_laneq_s8(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
+  int8x16_t __s0_411 = __p0_411; \
+  int8x16_t __s2_411 = __p2_411; \
+  int8x16_t __ret_411; \
+  __ret_411 = vsetq_lane_s8(vgetq_lane_s8(__s2_411, __p3_411), __s0_411, __p1_411); \
   __ret_411; \
 })
 #else
-#define vcopy_laneq_u32(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
-  uint32x2_t __s0_412 = __p0_412; \
-  uint32x4_t __s2_412 = __p2_412; \
-  uint32x2_t __rev0_412;  __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 1, 0); \
-  uint32x4_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 3, 2, 1, 0); \
-  uint32x2_t __ret_412; \
-  __ret_412 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_412, __p3_412), __rev0_412, __p1_412); \
-  __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 1, 0); \
+#define vcopyq_laneq_s8(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
+  int8x16_t __s0_412 = __p0_412; \
+  int8x16_t __s2_412 = __p2_412; \
+  int8x16_t __rev0_412;  __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_412; \
+  __ret_412 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_412, __p3_412), __rev0_412, __p1_412); \
+  __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_412; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u64(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
-  uint64x1_t __s0_413 = __p0_413; \
-  uint64x2_t __s2_413 = __p2_413; \
-  uint64x1_t __ret_413; \
-  __ret_413 = vset_lane_u64(vgetq_lane_u64(__s2_413, __p3_413), __s0_413, __p1_413); \
+#define vcopyq_laneq_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
+  float32x4_t __s0_413 = __p0_413; \
+  float32x4_t __s2_413 = __p2_413; \
+  float32x4_t __ret_413; \
+  __ret_413 = vsetq_lane_f32(vgetq_lane_f32(__s2_413, __p3_413), __s0_413, __p1_413); \
   __ret_413; \
 })
 #else
-#define vcopy_laneq_u64(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
-  uint64x1_t __s0_414 = __p0_414; \
-  uint64x2_t __s2_414 = __p2_414; \
-  uint64x2_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 1, 0); \
-  uint64x1_t __ret_414; \
-  __ret_414 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_414, __p3_414), __s0_414, __p1_414); \
+#define vcopyq_laneq_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
+  float32x4_t __s0_414 = __p0_414; \
+  float32x4_t __s2_414 = __p2_414; \
+  float32x4_t __rev0_414;  __rev0_414 = __builtin_shufflevector(__s0_414, __s0_414, 3, 2, 1, 0); \
+  float32x4_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 3, 2, 1, 0); \
+  float32x4_t __ret_414; \
+  __ret_414 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_414, __p3_414), __rev0_414, __p1_414); \
+  __ret_414 = __builtin_shufflevector(__ret_414, __ret_414, 3, 2, 1, 0); \
   __ret_414; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u16(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
-  uint16x4_t __s0_415 = __p0_415; \
-  uint16x8_t __s2_415 = __p2_415; \
-  uint16x4_t __ret_415; \
-  __ret_415 = vset_lane_u16(vgetq_lane_u16(__s2_415, __p3_415), __s0_415, __p1_415); \
+#define vcopyq_laneq_s32(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
+  int32x4_t __s0_415 = __p0_415; \
+  int32x4_t __s2_415 = __p2_415; \
+  int32x4_t __ret_415; \
+  __ret_415 = vsetq_lane_s32(vgetq_lane_s32(__s2_415, __p3_415), __s0_415, __p1_415); \
   __ret_415; \
 })
 #else
-#define vcopy_laneq_u16(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
-  uint16x4_t __s0_416 = __p0_416; \
-  uint16x8_t __s2_416 = __p2_416; \
-  uint16x4_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \
-  uint16x8_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_416; \
-  __ret_416 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_416, __p3_416), __rev0_416, __p1_416); \
+#define vcopyq_laneq_s32(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
+  int32x4_t __s0_416 = __p0_416; \
+  int32x4_t __s2_416 = __p2_416; \
+  int32x4_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \
+  int32x4_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 3, 2, 1, 0); \
+  int32x4_t __ret_416; \
+  __ret_416 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_416, __p3_416), __rev0_416, __p1_416); \
   __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 3, 2, 1, 0); \
   __ret_416; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s8(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
-  int8x8_t __s0_417 = __p0_417; \
-  int8x16_t __s2_417 = __p2_417; \
-  int8x8_t __ret_417; \
-  __ret_417 = vset_lane_s8(vgetq_lane_s8(__s2_417, __p3_417), __s0_417, __p1_417); \
+#define vcopyq_laneq_s64(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
+  int64x2_t __s0_417 = __p0_417; \
+  int64x2_t __s2_417 = __p2_417; \
+  int64x2_t __ret_417; \
+  __ret_417 = vsetq_lane_s64(vgetq_lane_s64(__s2_417, __p3_417), __s0_417, __p1_417); \
   __ret_417; \
 })
 #else
-#define vcopy_laneq_s8(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
-  int8x8_t __s0_418 = __p0_418; \
-  int8x16_t __s2_418 = __p2_418; \
-  int8x8_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_418; \
-  __ret_418 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_418, __p3_418), __rev0_418, __p1_418); \
-  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopyq_laneq_s64(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
+  int64x2_t __s0_418 = __p0_418; \
+  int64x2_t __s2_418 = __p2_418; \
+  int64x2_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 1, 0); \
+  int64x2_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 1, 0); \
+  int64x2_t __ret_418; \
+  __ret_418 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_418, __p3_418), __rev0_418, __p1_418); \
+  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 1, 0); \
   __ret_418; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f32(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
-  float32x2_t __s0_419 = __p0_419; \
-  float32x4_t __s2_419 = __p2_419; \
-  float32x2_t __ret_419; \
-  __ret_419 = vset_lane_f32(vgetq_lane_f32(__s2_419, __p3_419), __s0_419, __p1_419); \
+#define vcopyq_laneq_s16(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
+  int16x8_t __s0_419 = __p0_419; \
+  int16x8_t __s2_419 = __p2_419; \
+  int16x8_t __ret_419; \
+  __ret_419 = vsetq_lane_s16(vgetq_lane_s16(__s2_419, __p3_419), __s0_419, __p1_419); \
   __ret_419; \
 })
 #else
-#define vcopy_laneq_f32(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
-  float32x2_t __s0_420 = __p0_420; \
-  float32x4_t __s2_420 = __p2_420; \
-  float32x2_t __rev0_420;  __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 1, 0); \
-  float32x4_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 3, 2, 1, 0); \
-  float32x2_t __ret_420; \
-  __ret_420 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_420, __p3_420), __rev0_420, __p1_420); \
-  __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 1, 0); \
+#define vcopyq_laneq_s16(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
+  int16x8_t __s0_420 = __p0_420; \
+  int16x8_t __s2_420 = __p2_420; \
+  int16x8_t __rev0_420;  __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_420; \
+  __ret_420 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_420, __p3_420), __rev0_420, __p1_420); \
+  __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_420; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
-  int32x2_t __s0_421 = __p0_421; \
-  int32x4_t __s2_421 = __p2_421; \
-  int32x2_t __ret_421; \
-  __ret_421 = vset_lane_s32(vgetq_lane_s32(__s2_421, __p3_421), __s0_421, __p1_421); \
+#define vcopy_laneq_p8(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
+  poly8x8_t __s0_421 = __p0_421; \
+  poly8x16_t __s2_421 = __p2_421; \
+  poly8x8_t __ret_421; \
+  __ret_421 = vset_lane_p8(vgetq_lane_p8(__s2_421, __p3_421), __s0_421, __p1_421); \
   __ret_421; \
 })
 #else
-#define vcopy_laneq_s32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
-  int32x2_t __s0_422 = __p0_422; \
-  int32x4_t __s2_422 = __p2_422; \
-  int32x2_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 1, 0); \
-  int32x4_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \
-  int32x2_t __ret_422; \
-  __ret_422 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_422, __p3_422), __rev0_422, __p1_422); \
-  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 1, 0); \
+#define vcopy_laneq_p8(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
+  poly8x8_t __s0_422 = __p0_422; \
+  poly8x16_t __s2_422 = __p2_422; \
+  poly8x8_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_422; \
+  __ret_422 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_422, __p3_422), __rev0_422, __p1_422); \
+  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_422; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s64(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
-  int64x1_t __s0_423 = __p0_423; \
-  int64x2_t __s2_423 = __p2_423; \
-  int64x1_t __ret_423; \
-  __ret_423 = vset_lane_s64(vgetq_lane_s64(__s2_423, __p3_423), __s0_423, __p1_423); \
+#define vcopy_laneq_p16(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
+  poly16x4_t __s0_423 = __p0_423; \
+  poly16x8_t __s2_423 = __p2_423; \
+  poly16x4_t __ret_423; \
+  __ret_423 = vset_lane_p16(vgetq_lane_p16(__s2_423, __p3_423), __s0_423, __p1_423); \
   __ret_423; \
 })
 #else
-#define vcopy_laneq_s64(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
-  int64x1_t __s0_424 = __p0_424; \
-  int64x2_t __s2_424 = __p2_424; \
-  int64x2_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 1, 0); \
-  int64x1_t __ret_424; \
-  __ret_424 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_424, __p3_424), __s0_424, __p1_424); \
+#define vcopy_laneq_p16(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
+  poly16x4_t __s0_424 = __p0_424; \
+  poly16x8_t __s2_424 = __p2_424; \
+  poly16x4_t __rev0_424;  __rev0_424 = __builtin_shufflevector(__s0_424, __s0_424, 3, 2, 1, 0); \
+  poly16x8_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __ret_424; \
+  __ret_424 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_424, __p3_424), __rev0_424, __p1_424); \
+  __ret_424 = __builtin_shufflevector(__ret_424, __ret_424, 3, 2, 1, 0); \
   __ret_424; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s16(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
-  int16x4_t __s0_425 = __p0_425; \
-  int16x8_t __s2_425 = __p2_425; \
-  int16x4_t __ret_425; \
-  __ret_425 = vset_lane_s16(vgetq_lane_s16(__s2_425, __p3_425), __s0_425, __p1_425); \
+#define vcopy_laneq_u8(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
+  uint8x8_t __s0_425 = __p0_425; \
+  uint8x16_t __s2_425 = __p2_425; \
+  uint8x8_t __ret_425; \
+  __ret_425 = vset_lane_u8(vgetq_lane_u8(__s2_425, __p3_425), __s0_425, __p1_425); \
   __ret_425; \
 })
 #else
-#define vcopy_laneq_s16(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
-  int16x4_t __s0_426 = __p0_426; \
-  int16x8_t __s2_426 = __p2_426; \
-  int16x4_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 3, 2, 1, 0); \
-  int16x8_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_426; \
-  __ret_426 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_426, __p3_426), __rev0_426, __p1_426); \
-  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 3, 2, 1, 0); \
+#define vcopy_laneq_u8(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
+  uint8x8_t __s0_426 = __p0_426; \
+  uint8x16_t __s2_426 = __p2_426; \
+  uint8x8_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_426; \
+  __ret_426 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_426, __p3_426), __rev0_426, __p1_426); \
+  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_426; \
 })
 #endif
 
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \
+  uint32x2_t __s0_427 = __p0_427; \
+  uint32x4_t __s2_427 = __p2_427; \
+  uint32x2_t __ret_427; \
+  __ret_427 = vset_lane_u32(vgetq_lane_u32(__s2_427, __p3_427), __s0_427, __p1_427); \
+  __ret_427; \
+})
+#else
+#define vcopy_laneq_u32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \
+  uint32x2_t __s0_428 = __p0_428; \
+  uint32x4_t __s2_428 = __p2_428; \
+  uint32x2_t __rev0_428;  __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 1, 0); \
+  uint32x4_t __rev2_428;  __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \
+  uint32x2_t __ret_428; \
+  __ret_428 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_428, __p3_428), __rev0_428, __p1_428); \
+  __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 1, 0); \
+  __ret_428; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u64(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \
+  uint64x1_t __s0_429 = __p0_429; \
+  uint64x2_t __s2_429 = __p2_429; \
+  uint64x1_t __ret_429; \
+  __ret_429 = vset_lane_u64(vgetq_lane_u64(__s2_429, __p3_429), __s0_429, __p1_429); \
+  __ret_429; \
+})
+#else
+#define vcopy_laneq_u64(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \
+  uint64x1_t __s0_430 = __p0_430; \
+  uint64x2_t __s2_430 = __p2_430; \
+  uint64x2_t __rev2_430;  __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 1, 0); \
+  uint64x1_t __ret_430; \
+  __ret_430 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_430, __p3_430), __s0_430, __p1_430); \
+  __ret_430; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u16(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \
+  uint16x4_t __s0_431 = __p0_431; \
+  uint16x8_t __s2_431 = __p2_431; \
+  uint16x4_t __ret_431; \
+  __ret_431 = vset_lane_u16(vgetq_lane_u16(__s2_431, __p3_431), __s0_431, __p1_431); \
+  __ret_431; \
+})
+#else
+#define vcopy_laneq_u16(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \
+  uint16x4_t __s0_432 = __p0_432; \
+  uint16x8_t __s2_432 = __p2_432; \
+  uint16x4_t __rev0_432;  __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \
+  uint16x8_t __rev2_432;  __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_432; \
+  __ret_432 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_432, __p3_432), __rev0_432, __p1_432); \
+  __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \
+  __ret_432; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s8(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \
+  int8x8_t __s0_433 = __p0_433; \
+  int8x16_t __s2_433 = __p2_433; \
+  int8x8_t __ret_433; \
+  __ret_433 = vset_lane_s8(vgetq_lane_s8(__s2_433, __p3_433), __s0_433, __p1_433); \
+  __ret_433; \
+})
+#else
+#define vcopy_laneq_s8(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \
+  int8x8_t __s0_434 = __p0_434; \
+  int8x16_t __s2_434 = __p2_434; \
+  int8x8_t __rev0_434;  __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_434;  __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_434; \
+  __ret_434 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_434, __p3_434), __rev0_434, __p1_434); \
+  __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_434; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_f32(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \
+  float32x2_t __s0_435 = __p0_435; \
+  float32x4_t __s2_435 = __p2_435; \
+  float32x2_t __ret_435; \
+  __ret_435 = vset_lane_f32(vgetq_lane_f32(__s2_435, __p3_435), __s0_435, __p1_435); \
+  __ret_435; \
+})
+#else
+#define vcopy_laneq_f32(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \
+  float32x2_t __s0_436 = __p0_436; \
+  float32x4_t __s2_436 = __p2_436; \
+  float32x2_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 1, 0); \
+  float32x4_t __rev2_436;  __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 3, 2, 1, 0); \
+  float32x2_t __ret_436; \
+  __ret_436 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_436, __p3_436), __rev0_436, __p1_436); \
+  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 1, 0); \
+  __ret_436; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \
+  int32x2_t __s0_437 = __p0_437; \
+  int32x4_t __s2_437 = __p2_437; \
+  int32x2_t __ret_437; \
+  __ret_437 = vset_lane_s32(vgetq_lane_s32(__s2_437, __p3_437), __s0_437, __p1_437); \
+  __ret_437; \
+})
+#else
+#define vcopy_laneq_s32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \
+  int32x2_t __s0_438 = __p0_438; \
+  int32x4_t __s2_438 = __p2_438; \
+  int32x2_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \
+  int32x4_t __rev2_438;  __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \
+  int32x2_t __ret_438; \
+  __ret_438 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_438, __p3_438), __rev0_438, __p1_438); \
+  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \
+  __ret_438; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s64(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \
+  int64x1_t __s0_439 = __p0_439; \
+  int64x2_t __s2_439 = __p2_439; \
+  int64x1_t __ret_439; \
+  __ret_439 = vset_lane_s64(vgetq_lane_s64(__s2_439, __p3_439), __s0_439, __p1_439); \
+  __ret_439; \
+})
+#else
+#define vcopy_laneq_s64(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \
+  int64x1_t __s0_440 = __p0_440; \
+  int64x2_t __s2_440 = __p2_440; \
+  int64x2_t __rev2_440;  __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 1, 0); \
+  int64x1_t __ret_440; \
+  __ret_440 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_440, __p3_440), __s0_440, __p1_440); \
+  __ret_440; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s16(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \
+  int16x4_t __s0_441 = __p0_441; \
+  int16x8_t __s2_441 = __p2_441; \
+  int16x4_t __ret_441; \
+  __ret_441 = vset_lane_s16(vgetq_lane_s16(__s2_441, __p3_441), __s0_441, __p1_441); \
+  __ret_441; \
+})
+#else
+#define vcopy_laneq_s16(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \
+  int16x4_t __s0_442 = __p0_442; \
+  int16x8_t __s2_442 = __p2_442; \
+  int16x4_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 3, 2, 1, 0); \
+  int16x8_t __rev2_442;  __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_442; \
+  __ret_442 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_442, __p3_442), __rev0_442, __p1_442); \
+  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
+  __ret_442; \
+})
+#endif
+
 #define vcreate_p64(__p0) __extension__ ({ \
   poly64x1_t __ret; \
   uint64_t __promote = __p0; \
@@ -51625,85 +51813,85 @@
 })
 #endif
 
-#define vdup_lane_p64(__p0_427, __p1_427) __extension__ ({ \
-  poly64x1_t __s0_427 = __p0_427; \
-  poly64x1_t __ret_427; \
-  __ret_427 = splat_lane_p64(__s0_427, __p1_427); \
-  __ret_427; \
+#define vdup_lane_p64(__p0_443, __p1_443) __extension__ ({ \
+  poly64x1_t __s0_443 = __p0_443; \
+  poly64x1_t __ret_443; \
+  __ret_443 = splat_lane_p64(__s0_443, __p1_443); \
+  __ret_443; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p64(__p0_428, __p1_428) __extension__ ({ \
-  poly64x1_t __s0_428 = __p0_428; \
-  poly64x2_t __ret_428; \
-  __ret_428 = splatq_lane_p64(__s0_428, __p1_428); \
-  __ret_428; \
+#define vdupq_lane_p64(__p0_444, __p1_444) __extension__ ({ \
+  poly64x1_t __s0_444 = __p0_444; \
+  poly64x2_t __ret_444; \
+  __ret_444 = splatq_lane_p64(__s0_444, __p1_444); \
+  __ret_444; \
 })
 #else
-#define vdupq_lane_p64(__p0_429, __p1_429) __extension__ ({ \
-  poly64x1_t __s0_429 = __p0_429; \
-  poly64x2_t __ret_429; \
-  __ret_429 = __noswap_splatq_lane_p64(__s0_429, __p1_429); \
-  __ret_429 = __builtin_shufflevector(__ret_429, __ret_429, 1, 0); \
-  __ret_429; \
+#define vdupq_lane_p64(__p0_445, __p1_445) __extension__ ({ \
+  poly64x1_t __s0_445 = __p0_445; \
+  poly64x2_t __ret_445; \
+  __ret_445 = __noswap_splatq_lane_p64(__s0_445, __p1_445); \
+  __ret_445 = __builtin_shufflevector(__ret_445, __ret_445, 1, 0); \
+  __ret_445; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f64(__p0_430, __p1_430) __extension__ ({ \
-  float64x1_t __s0_430 = __p0_430; \
-  float64x2_t __ret_430; \
-  __ret_430 = splatq_lane_f64(__s0_430, __p1_430); \
-  __ret_430; \
+#define vdupq_lane_f64(__p0_446, __p1_446) __extension__ ({ \
+  float64x1_t __s0_446 = __p0_446; \
+  float64x2_t __ret_446; \
+  __ret_446 = splatq_lane_f64(__s0_446, __p1_446); \
+  __ret_446; \
 })
 #else
-#define vdupq_lane_f64(__p0_431, __p1_431) __extension__ ({ \
-  float64x1_t __s0_431 = __p0_431; \
-  float64x2_t __ret_431; \
-  __ret_431 = __noswap_splatq_lane_f64(__s0_431, __p1_431); \
-  __ret_431 = __builtin_shufflevector(__ret_431, __ret_431, 1, 0); \
-  __ret_431; \
+#define vdupq_lane_f64(__p0_447, __p1_447) __extension__ ({ \
+  float64x1_t __s0_447 = __p0_447; \
+  float64x2_t __ret_447; \
+  __ret_447 = __noswap_splatq_lane_f64(__s0_447, __p1_447); \
+  __ret_447 = __builtin_shufflevector(__ret_447, __ret_447, 1, 0); \
+  __ret_447; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0_432, __p1_432) __extension__ ({ \
-  float16x4_t __s0_432 = __p0_432; \
-  float16x8_t __ret_432; \
-  __ret_432 = splatq_lane_f16(__s0_432, __p1_432); \
-  __ret_432; \
+#define vdupq_lane_f16(__p0_448, __p1_448) __extension__ ({ \
+  float16x4_t __s0_448 = __p0_448; \
+  float16x8_t __ret_448; \
+  __ret_448 = splatq_lane_f16(__s0_448, __p1_448); \
+  __ret_448; \
 })
 #else
-#define vdupq_lane_f16(__p0_433, __p1_433) __extension__ ({ \
-  float16x4_t __s0_433 = __p0_433; \
-  float16x4_t __rev0_433;  __rev0_433 = __builtin_shufflevector(__s0_433, __s0_433, 3, 2, 1, 0); \
-  float16x8_t __ret_433; \
-  __ret_433 = __noswap_splatq_lane_f16(__rev0_433, __p1_433); \
-  __ret_433 = __builtin_shufflevector(__ret_433, __ret_433, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_433; \
+#define vdupq_lane_f16(__p0_449, __p1_449) __extension__ ({ \
+  float16x4_t __s0_449 = __p0_449; \
+  float16x4_t __rev0_449;  __rev0_449 = __builtin_shufflevector(__s0_449, __s0_449, 3, 2, 1, 0); \
+  float16x8_t __ret_449; \
+  __ret_449 = __noswap_splatq_lane_f16(__rev0_449, __p1_449); \
+  __ret_449 = __builtin_shufflevector(__ret_449, __ret_449, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_449; \
 })
 #endif
 
-#define vdup_lane_f64(__p0_434, __p1_434) __extension__ ({ \
-  float64x1_t __s0_434 = __p0_434; \
-  float64x1_t __ret_434; \
-  __ret_434 = splat_lane_f64(__s0_434, __p1_434); \
-  __ret_434; \
+#define vdup_lane_f64(__p0_450, __p1_450) __extension__ ({ \
+  float64x1_t __s0_450 = __p0_450; \
+  float64x1_t __ret_450; \
+  __ret_450 = splat_lane_f64(__s0_450, __p1_450); \
+  __ret_450; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0_435, __p1_435) __extension__ ({ \
-  float16x4_t __s0_435 = __p0_435; \
-  float16x4_t __ret_435; \
-  __ret_435 = splat_lane_f16(__s0_435, __p1_435); \
-  __ret_435; \
+#define vdup_lane_f16(__p0_451, __p1_451) __extension__ ({ \
+  float16x4_t __s0_451 = __p0_451; \
+  float16x4_t __ret_451; \
+  __ret_451 = splat_lane_f16(__s0_451, __p1_451); \
+  __ret_451; \
 })
 #else
-#define vdup_lane_f16(__p0_436, __p1_436) __extension__ ({ \
-  float16x4_t __s0_436 = __p0_436; \
-  float16x4_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 3, 2, 1, 0); \
-  float16x4_t __ret_436; \
-  __ret_436 = __noswap_splat_lane_f16(__rev0_436, __p1_436); \
-  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 3, 2, 1, 0); \
-  __ret_436; \
+#define vdup_lane_f16(__p0_452, __p1_452) __extension__ ({ \
+  float16x4_t __s0_452 = __p0_452; \
+  float16x4_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 3, 2, 1, 0); \
+  float16x4_t __ret_452; \
+  __ret_452 = __noswap_splat_lane_f16(__rev0_452, __p1_452); \
+  __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 3, 2, 1, 0); \
+  __ret_452; \
 })
 #endif
 
@@ -51912,505 +52100,505 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p8(__p0_437, __p1_437) __extension__ ({ \
-  poly8x16_t __s0_437 = __p0_437; \
-  poly8x8_t __ret_437; \
-  __ret_437 = splat_laneq_p8(__s0_437, __p1_437); \
-  __ret_437; \
-})
-#else
-#define vdup_laneq_p8(__p0_438, __p1_438) __extension__ ({ \
-  poly8x16_t __s0_438 = __p0_438; \
-  poly8x16_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_438; \
-  __ret_438 = __noswap_splat_laneq_p8(__rev0_438, __p1_438); \
-  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_438; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p64(__p0_439, __p1_439) __extension__ ({ \
-  poly64x2_t __s0_439 = __p0_439; \
-  poly64x1_t __ret_439; \
-  __ret_439 = splat_laneq_p64(__s0_439, __p1_439); \
-  __ret_439; \
-})
-#else
-#define vdup_laneq_p64(__p0_440, __p1_440) __extension__ ({ \
-  poly64x2_t __s0_440 = __p0_440; \
-  poly64x2_t __rev0_440;  __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 1, 0); \
-  poly64x1_t __ret_440; \
-  __ret_440 = __noswap_splat_laneq_p64(__rev0_440, __p1_440); \
-  __ret_440; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p16(__p0_441, __p1_441) __extension__ ({ \
-  poly16x8_t __s0_441 = __p0_441; \
-  poly16x4_t __ret_441; \
-  __ret_441 = splat_laneq_p16(__s0_441, __p1_441); \
-  __ret_441; \
-})
-#else
-#define vdup_laneq_p16(__p0_442, __p1_442) __extension__ ({ \
-  poly16x8_t __s0_442 = __p0_442; \
-  poly16x8_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_442; \
-  __ret_442 = __noswap_splat_laneq_p16(__rev0_442, __p1_442); \
-  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
-  __ret_442; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p8(__p0_443, __p1_443) __extension__ ({ \
-  poly8x16_t __s0_443 = __p0_443; \
-  poly8x16_t __ret_443; \
-  __ret_443 = splatq_laneq_p8(__s0_443, __p1_443); \
-  __ret_443; \
-})
-#else
-#define vdupq_laneq_p8(__p0_444, __p1_444) __extension__ ({ \
-  poly8x16_t __s0_444 = __p0_444; \
-  poly8x16_t __rev0_444;  __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_444; \
-  __ret_444 = __noswap_splatq_laneq_p8(__rev0_444, __p1_444); \
-  __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_444; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p64(__p0_445, __p1_445) __extension__ ({ \
-  poly64x2_t __s0_445 = __p0_445; \
-  poly64x2_t __ret_445; \
-  __ret_445 = splatq_laneq_p64(__s0_445, __p1_445); \
-  __ret_445; \
-})
-#else
-#define vdupq_laneq_p64(__p0_446, __p1_446) __extension__ ({ \
-  poly64x2_t __s0_446 = __p0_446; \
-  poly64x2_t __rev0_446;  __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 1, 0); \
-  poly64x2_t __ret_446; \
-  __ret_446 = __noswap_splatq_laneq_p64(__rev0_446, __p1_446); \
-  __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 1, 0); \
-  __ret_446; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p16(__p0_447, __p1_447) __extension__ ({ \
-  poly16x8_t __s0_447 = __p0_447; \
-  poly16x8_t __ret_447; \
-  __ret_447 = splatq_laneq_p16(__s0_447, __p1_447); \
-  __ret_447; \
-})
-#else
-#define vdupq_laneq_p16(__p0_448, __p1_448) __extension__ ({ \
-  poly16x8_t __s0_448 = __p0_448; \
-  poly16x8_t __rev0_448;  __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_448; \
-  __ret_448 = __noswap_splatq_laneq_p16(__rev0_448, __p1_448); \
-  __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_448; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u8(__p0_449, __p1_449) __extension__ ({ \
-  uint8x16_t __s0_449 = __p0_449; \
-  uint8x16_t __ret_449; \
-  __ret_449 = splatq_laneq_u8(__s0_449, __p1_449); \
-  __ret_449; \
-})
-#else
-#define vdupq_laneq_u8(__p0_450, __p1_450) __extension__ ({ \
-  uint8x16_t __s0_450 = __p0_450; \
-  uint8x16_t __rev0_450;  __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_450; \
-  __ret_450 = __noswap_splatq_laneq_u8(__rev0_450, __p1_450); \
-  __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_450; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u32(__p0_451, __p1_451) __extension__ ({ \
-  uint32x4_t __s0_451 = __p0_451; \
-  uint32x4_t __ret_451; \
-  __ret_451 = splatq_laneq_u32(__s0_451, __p1_451); \
-  __ret_451; \
-})
-#else
-#define vdupq_laneq_u32(__p0_452, __p1_452) __extension__ ({ \
-  uint32x4_t __s0_452 = __p0_452; \
-  uint32x4_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 3, 2, 1, 0); \
-  uint32x4_t __ret_452; \
-  __ret_452 = __noswap_splatq_laneq_u32(__rev0_452, __p1_452); \
-  __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 3, 2, 1, 0); \
-  __ret_452; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u64(__p0_453, __p1_453) __extension__ ({ \
-  uint64x2_t __s0_453 = __p0_453; \
-  uint64x2_t __ret_453; \
-  __ret_453 = splatq_laneq_u64(__s0_453, __p1_453); \
+#define vdup_laneq_p8(__p0_453, __p1_453) __extension__ ({ \
+  poly8x16_t __s0_453 = __p0_453; \
+  poly8x8_t __ret_453; \
+  __ret_453 = splat_laneq_p8(__s0_453, __p1_453); \
   __ret_453; \
 })
 #else
-#define vdupq_laneq_u64(__p0_454, __p1_454) __extension__ ({ \
-  uint64x2_t __s0_454 = __p0_454; \
-  uint64x2_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 1, 0); \
-  uint64x2_t __ret_454; \
-  __ret_454 = __noswap_splatq_laneq_u64(__rev0_454, __p1_454); \
-  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 1, 0); \
+#define vdup_laneq_p8(__p0_454, __p1_454) __extension__ ({ \
+  poly8x16_t __s0_454 = __p0_454; \
+  poly8x16_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_454; \
+  __ret_454 = __noswap_splat_laneq_p8(__rev0_454, __p1_454); \
+  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_454; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u16(__p0_455, __p1_455) __extension__ ({ \
-  uint16x8_t __s0_455 = __p0_455; \
-  uint16x8_t __ret_455; \
-  __ret_455 = splatq_laneq_u16(__s0_455, __p1_455); \
+#define vdup_laneq_p64(__p0_455, __p1_455) __extension__ ({ \
+  poly64x2_t __s0_455 = __p0_455; \
+  poly64x1_t __ret_455; \
+  __ret_455 = splat_laneq_p64(__s0_455, __p1_455); \
   __ret_455; \
 })
 #else
-#define vdupq_laneq_u16(__p0_456, __p1_456) __extension__ ({ \
-  uint16x8_t __s0_456 = __p0_456; \
-  uint16x8_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_456; \
-  __ret_456 = __noswap_splatq_laneq_u16(__rev0_456, __p1_456); \
-  __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vdup_laneq_p64(__p0_456, __p1_456) __extension__ ({ \
+  poly64x2_t __s0_456 = __p0_456; \
+  poly64x2_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 1, 0); \
+  poly64x1_t __ret_456; \
+  __ret_456 = __noswap_splat_laneq_p64(__rev0_456, __p1_456); \
   __ret_456; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s8(__p0_457, __p1_457) __extension__ ({ \
-  int8x16_t __s0_457 = __p0_457; \
-  int8x16_t __ret_457; \
-  __ret_457 = splatq_laneq_s8(__s0_457, __p1_457); \
+#define vdup_laneq_p16(__p0_457, __p1_457) __extension__ ({ \
+  poly16x8_t __s0_457 = __p0_457; \
+  poly16x4_t __ret_457; \
+  __ret_457 = splat_laneq_p16(__s0_457, __p1_457); \
   __ret_457; \
 })
 #else
-#define vdupq_laneq_s8(__p0_458, __p1_458) __extension__ ({ \
-  int8x16_t __s0_458 = __p0_458; \
-  int8x16_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_458; \
-  __ret_458 = __noswap_splatq_laneq_s8(__rev0_458, __p1_458); \
-  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vdup_laneq_p16(__p0_458, __p1_458) __extension__ ({ \
+  poly16x8_t __s0_458 = __p0_458; \
+  poly16x8_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __ret_458; \
+  __ret_458 = __noswap_splat_laneq_p16(__rev0_458, __p1_458); \
+  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 3, 2, 1, 0); \
   __ret_458; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f64(__p0_459, __p1_459) __extension__ ({ \
-  float64x2_t __s0_459 = __p0_459; \
-  float64x2_t __ret_459; \
-  __ret_459 = splatq_laneq_f64(__s0_459, __p1_459); \
+#define vdupq_laneq_p8(__p0_459, __p1_459) __extension__ ({ \
+  poly8x16_t __s0_459 = __p0_459; \
+  poly8x16_t __ret_459; \
+  __ret_459 = splatq_laneq_p8(__s0_459, __p1_459); \
   __ret_459; \
 })
 #else
-#define vdupq_laneq_f64(__p0_460, __p1_460) __extension__ ({ \
-  float64x2_t __s0_460 = __p0_460; \
-  float64x2_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \
-  float64x2_t __ret_460; \
-  __ret_460 = __noswap_splatq_laneq_f64(__rev0_460, __p1_460); \
-  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \
+#define vdupq_laneq_p8(__p0_460, __p1_460) __extension__ ({ \
+  poly8x16_t __s0_460 = __p0_460; \
+  poly8x16_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_460; \
+  __ret_460 = __noswap_splatq_laneq_p8(__rev0_460, __p1_460); \
+  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_460; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f32(__p0_461, __p1_461) __extension__ ({ \
-  float32x4_t __s0_461 = __p0_461; \
-  float32x4_t __ret_461; \
-  __ret_461 = splatq_laneq_f32(__s0_461, __p1_461); \
+#define vdupq_laneq_p64(__p0_461, __p1_461) __extension__ ({ \
+  poly64x2_t __s0_461 = __p0_461; \
+  poly64x2_t __ret_461; \
+  __ret_461 = splatq_laneq_p64(__s0_461, __p1_461); \
   __ret_461; \
 })
 #else
-#define vdupq_laneq_f32(__p0_462, __p1_462) __extension__ ({ \
-  float32x4_t __s0_462 = __p0_462; \
-  float32x4_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \
-  float32x4_t __ret_462; \
-  __ret_462 = __noswap_splatq_laneq_f32(__rev0_462, __p1_462); \
-  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \
+#define vdupq_laneq_p64(__p0_462, __p1_462) __extension__ ({ \
+  poly64x2_t __s0_462 = __p0_462; \
+  poly64x2_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 1, 0); \
+  poly64x2_t __ret_462; \
+  __ret_462 = __noswap_splatq_laneq_p64(__rev0_462, __p1_462); \
+  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 1, 0); \
   __ret_462; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f16(__p0_463, __p1_463) __extension__ ({ \
-  float16x8_t __s0_463 = __p0_463; \
-  float16x8_t __ret_463; \
-  __ret_463 = splatq_laneq_f16(__s0_463, __p1_463); \
+#define vdupq_laneq_p16(__p0_463, __p1_463) __extension__ ({ \
+  poly16x8_t __s0_463 = __p0_463; \
+  poly16x8_t __ret_463; \
+  __ret_463 = splatq_laneq_p16(__s0_463, __p1_463); \
   __ret_463; \
 })
 #else
-#define vdupq_laneq_f16(__p0_464, __p1_464) __extension__ ({ \
-  float16x8_t __s0_464 = __p0_464; \
-  float16x8_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_464; \
-  __ret_464 = __noswap_splatq_laneq_f16(__rev0_464, __p1_464); \
+#define vdupq_laneq_p16(__p0_464, __p1_464) __extension__ ({ \
+  poly16x8_t __s0_464 = __p0_464; \
+  poly16x8_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __ret_464; \
+  __ret_464 = __noswap_splatq_laneq_p16(__rev0_464, __p1_464); \
   __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_464; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s32(__p0_465, __p1_465) __extension__ ({ \
-  int32x4_t __s0_465 = __p0_465; \
-  int32x4_t __ret_465; \
-  __ret_465 = splatq_laneq_s32(__s0_465, __p1_465); \
+#define vdupq_laneq_u8(__p0_465, __p1_465) __extension__ ({ \
+  uint8x16_t __s0_465 = __p0_465; \
+  uint8x16_t __ret_465; \
+  __ret_465 = splatq_laneq_u8(__s0_465, __p1_465); \
   __ret_465; \
 })
 #else
-#define vdupq_laneq_s32(__p0_466, __p1_466) __extension__ ({ \
-  int32x4_t __s0_466 = __p0_466; \
-  int32x4_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \
-  int32x4_t __ret_466; \
-  __ret_466 = __noswap_splatq_laneq_s32(__rev0_466, __p1_466); \
-  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \
+#define vdupq_laneq_u8(__p0_466, __p1_466) __extension__ ({ \
+  uint8x16_t __s0_466 = __p0_466; \
+  uint8x16_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_466; \
+  __ret_466 = __noswap_splatq_laneq_u8(__rev0_466, __p1_466); \
+  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_466; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s64(__p0_467, __p1_467) __extension__ ({ \
-  int64x2_t __s0_467 = __p0_467; \
-  int64x2_t __ret_467; \
-  __ret_467 = splatq_laneq_s64(__s0_467, __p1_467); \
+#define vdupq_laneq_u32(__p0_467, __p1_467) __extension__ ({ \
+  uint32x4_t __s0_467 = __p0_467; \
+  uint32x4_t __ret_467; \
+  __ret_467 = splatq_laneq_u32(__s0_467, __p1_467); \
   __ret_467; \
 })
 #else
-#define vdupq_laneq_s64(__p0_468, __p1_468) __extension__ ({ \
-  int64x2_t __s0_468 = __p0_468; \
-  int64x2_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 1, 0); \
-  int64x2_t __ret_468; \
-  __ret_468 = __noswap_splatq_laneq_s64(__rev0_468, __p1_468); \
-  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 1, 0); \
+#define vdupq_laneq_u32(__p0_468, __p1_468) __extension__ ({ \
+  uint32x4_t __s0_468 = __p0_468; \
+  uint32x4_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 3, 2, 1, 0); \
+  uint32x4_t __ret_468; \
+  __ret_468 = __noswap_splatq_laneq_u32(__rev0_468, __p1_468); \
+  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 3, 2, 1, 0); \
   __ret_468; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s16(__p0_469, __p1_469) __extension__ ({ \
-  int16x8_t __s0_469 = __p0_469; \
-  int16x8_t __ret_469; \
-  __ret_469 = splatq_laneq_s16(__s0_469, __p1_469); \
+#define vdupq_laneq_u64(__p0_469, __p1_469) __extension__ ({ \
+  uint64x2_t __s0_469 = __p0_469; \
+  uint64x2_t __ret_469; \
+  __ret_469 = splatq_laneq_u64(__s0_469, __p1_469); \
   __ret_469; \
 })
 #else
-#define vdupq_laneq_s16(__p0_470, __p1_470) __extension__ ({ \
-  int16x8_t __s0_470 = __p0_470; \
-  int16x8_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_470; \
-  __ret_470 = __noswap_splatq_laneq_s16(__rev0_470, __p1_470); \
-  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vdupq_laneq_u64(__p0_470, __p1_470) __extension__ ({ \
+  uint64x2_t __s0_470 = __p0_470; \
+  uint64x2_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 1, 0); \
+  uint64x2_t __ret_470; \
+  __ret_470 = __noswap_splatq_laneq_u64(__rev0_470, __p1_470); \
+  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 1, 0); \
   __ret_470; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u8(__p0_471, __p1_471) __extension__ ({ \
-  uint8x16_t __s0_471 = __p0_471; \
-  uint8x8_t __ret_471; \
-  __ret_471 = splat_laneq_u8(__s0_471, __p1_471); \
+#define vdupq_laneq_u16(__p0_471, __p1_471) __extension__ ({ \
+  uint16x8_t __s0_471 = __p0_471; \
+  uint16x8_t __ret_471; \
+  __ret_471 = splatq_laneq_u16(__s0_471, __p1_471); \
   __ret_471; \
 })
 #else
-#define vdup_laneq_u8(__p0_472, __p1_472) __extension__ ({ \
-  uint8x16_t __s0_472 = __p0_472; \
-  uint8x16_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_472; \
-  __ret_472 = __noswap_splat_laneq_u8(__rev0_472, __p1_472); \
+#define vdupq_laneq_u16(__p0_472, __p1_472) __extension__ ({ \
+  uint16x8_t __s0_472 = __p0_472; \
+  uint16x8_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_472; \
+  __ret_472 = __noswap_splatq_laneq_u16(__rev0_472, __p1_472); \
   __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_472; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u32(__p0_473, __p1_473) __extension__ ({ \
-  uint32x4_t __s0_473 = __p0_473; \
-  uint32x2_t __ret_473; \
-  __ret_473 = splat_laneq_u32(__s0_473, __p1_473); \
+#define vdupq_laneq_s8(__p0_473, __p1_473) __extension__ ({ \
+  int8x16_t __s0_473 = __p0_473; \
+  int8x16_t __ret_473; \
+  __ret_473 = splatq_laneq_s8(__s0_473, __p1_473); \
   __ret_473; \
 })
 #else
-#define vdup_laneq_u32(__p0_474, __p1_474) __extension__ ({ \
-  uint32x4_t __s0_474 = __p0_474; \
-  uint32x4_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 3, 2, 1, 0); \
-  uint32x2_t __ret_474; \
-  __ret_474 = __noswap_splat_laneq_u32(__rev0_474, __p1_474); \
-  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 1, 0); \
+#define vdupq_laneq_s8(__p0_474, __p1_474) __extension__ ({ \
+  int8x16_t __s0_474 = __p0_474; \
+  int8x16_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_474; \
+  __ret_474 = __noswap_splatq_laneq_s8(__rev0_474, __p1_474); \
+  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_474; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u64(__p0_475, __p1_475) __extension__ ({ \
-  uint64x2_t __s0_475 = __p0_475; \
-  uint64x1_t __ret_475; \
-  __ret_475 = splat_laneq_u64(__s0_475, __p1_475); \
+#define vdupq_laneq_f64(__p0_475, __p1_475) __extension__ ({ \
+  float64x2_t __s0_475 = __p0_475; \
+  float64x2_t __ret_475; \
+  __ret_475 = splatq_laneq_f64(__s0_475, __p1_475); \
   __ret_475; \
 })
 #else
-#define vdup_laneq_u64(__p0_476, __p1_476) __extension__ ({ \
-  uint64x2_t __s0_476 = __p0_476; \
-  uint64x2_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 1, 0); \
-  uint64x1_t __ret_476; \
-  __ret_476 = __noswap_splat_laneq_u64(__rev0_476, __p1_476); \
+#define vdupq_laneq_f64(__p0_476, __p1_476) __extension__ ({ \
+  float64x2_t __s0_476 = __p0_476; \
+  float64x2_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 1, 0); \
+  float64x2_t __ret_476; \
+  __ret_476 = __noswap_splatq_laneq_f64(__rev0_476, __p1_476); \
+  __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 1, 0); \
   __ret_476; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u16(__p0_477, __p1_477) __extension__ ({ \
-  uint16x8_t __s0_477 = __p0_477; \
-  uint16x4_t __ret_477; \
-  __ret_477 = splat_laneq_u16(__s0_477, __p1_477); \
+#define vdupq_laneq_f32(__p0_477, __p1_477) __extension__ ({ \
+  float32x4_t __s0_477 = __p0_477; \
+  float32x4_t __ret_477; \
+  __ret_477 = splatq_laneq_f32(__s0_477, __p1_477); \
   __ret_477; \
 })
 #else
-#define vdup_laneq_u16(__p0_478, __p1_478) __extension__ ({ \
-  uint16x8_t __s0_478 = __p0_478; \
-  uint16x8_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_478; \
-  __ret_478 = __noswap_splat_laneq_u16(__rev0_478, __p1_478); \
+#define vdupq_laneq_f32(__p0_478, __p1_478) __extension__ ({ \
+  float32x4_t __s0_478 = __p0_478; \
+  float32x4_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 3, 2, 1, 0); \
+  float32x4_t __ret_478; \
+  __ret_478 = __noswap_splatq_laneq_f32(__rev0_478, __p1_478); \
   __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \
   __ret_478; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s8(__p0_479, __p1_479) __extension__ ({ \
-  int8x16_t __s0_479 = __p0_479; \
-  int8x8_t __ret_479; \
-  __ret_479 = splat_laneq_s8(__s0_479, __p1_479); \
+#define vdupq_laneq_f16(__p0_479, __p1_479) __extension__ ({ \
+  float16x8_t __s0_479 = __p0_479; \
+  float16x8_t __ret_479; \
+  __ret_479 = splatq_laneq_f16(__s0_479, __p1_479); \
   __ret_479; \
 })
 #else
-#define vdup_laneq_s8(__p0_480, __p1_480) __extension__ ({ \
-  int8x16_t __s0_480 = __p0_480; \
-  int8x16_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_480; \
-  __ret_480 = __noswap_splat_laneq_s8(__rev0_480, __p1_480); \
+#define vdupq_laneq_f16(__p0_480, __p1_480) __extension__ ({ \
+  float16x8_t __s0_480 = __p0_480; \
+  float16x8_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_480; \
+  __ret_480 = __noswap_splatq_laneq_f16(__rev0_480, __p1_480); \
   __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_480; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f64(__p0_481, __p1_481) __extension__ ({ \
-  float64x2_t __s0_481 = __p0_481; \
-  float64x1_t __ret_481; \
-  __ret_481 = splat_laneq_f64(__s0_481, __p1_481); \
+#define vdupq_laneq_s32(__p0_481, __p1_481) __extension__ ({ \
+  int32x4_t __s0_481 = __p0_481; \
+  int32x4_t __ret_481; \
+  __ret_481 = splatq_laneq_s32(__s0_481, __p1_481); \
   __ret_481; \
 })
 #else
-#define vdup_laneq_f64(__p0_482, __p1_482) __extension__ ({ \
-  float64x2_t __s0_482 = __p0_482; \
-  float64x2_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \
-  float64x1_t __ret_482; \
-  __ret_482 = __noswap_splat_laneq_f64(__rev0_482, __p1_482); \
+#define vdupq_laneq_s32(__p0_482, __p1_482) __extension__ ({ \
+  int32x4_t __s0_482 = __p0_482; \
+  int32x4_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 3, 2, 1, 0); \
+  int32x4_t __ret_482; \
+  __ret_482 = __noswap_splatq_laneq_s32(__rev0_482, __p1_482); \
+  __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 3, 2, 1, 0); \
   __ret_482; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f32(__p0_483, __p1_483) __extension__ ({ \
-  float32x4_t __s0_483 = __p0_483; \
-  float32x2_t __ret_483; \
-  __ret_483 = splat_laneq_f32(__s0_483, __p1_483); \
+#define vdupq_laneq_s64(__p0_483, __p1_483) __extension__ ({ \
+  int64x2_t __s0_483 = __p0_483; \
+  int64x2_t __ret_483; \
+  __ret_483 = splatq_laneq_s64(__s0_483, __p1_483); \
   __ret_483; \
 })
 #else
-#define vdup_laneq_f32(__p0_484, __p1_484) __extension__ ({ \
-  float32x4_t __s0_484 = __p0_484; \
-  float32x4_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 3, 2, 1, 0); \
-  float32x2_t __ret_484; \
-  __ret_484 = __noswap_splat_laneq_f32(__rev0_484, __p1_484); \
+#define vdupq_laneq_s64(__p0_484, __p1_484) __extension__ ({ \
+  int64x2_t __s0_484 = __p0_484; \
+  int64x2_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 1, 0); \
+  int64x2_t __ret_484; \
+  __ret_484 = __noswap_splatq_laneq_s64(__rev0_484, __p1_484); \
   __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 1, 0); \
   __ret_484; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f16(__p0_485, __p1_485) __extension__ ({ \
-  float16x8_t __s0_485 = __p0_485; \
-  float16x4_t __ret_485; \
-  __ret_485 = splat_laneq_f16(__s0_485, __p1_485); \
+#define vdupq_laneq_s16(__p0_485, __p1_485) __extension__ ({ \
+  int16x8_t __s0_485 = __p0_485; \
+  int16x8_t __ret_485; \
+  __ret_485 = splatq_laneq_s16(__s0_485, __p1_485); \
   __ret_485; \
 })
 #else
-#define vdup_laneq_f16(__p0_486, __p1_486) __extension__ ({ \
-  float16x8_t __s0_486 = __p0_486; \
-  float16x8_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_486; \
-  __ret_486 = __noswap_splat_laneq_f16(__rev0_486, __p1_486); \
-  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 3, 2, 1, 0); \
+#define vdupq_laneq_s16(__p0_486, __p1_486) __extension__ ({ \
+  int16x8_t __s0_486 = __p0_486; \
+  int16x8_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_486; \
+  __ret_486 = __noswap_splatq_laneq_s16(__rev0_486, __p1_486); \
+  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_486; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s32(__p0_487, __p1_487) __extension__ ({ \
-  int32x4_t __s0_487 = __p0_487; \
-  int32x2_t __ret_487; \
-  __ret_487 = splat_laneq_s32(__s0_487, __p1_487); \
+#define vdup_laneq_u8(__p0_487, __p1_487) __extension__ ({ \
+  uint8x16_t __s0_487 = __p0_487; \
+  uint8x8_t __ret_487; \
+  __ret_487 = splat_laneq_u8(__s0_487, __p1_487); \
   __ret_487; \
 })
 #else
-#define vdup_laneq_s32(__p0_488, __p1_488) __extension__ ({ \
-  int32x4_t __s0_488 = __p0_488; \
-  int32x4_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 3, 2, 1, 0); \
-  int32x2_t __ret_488; \
-  __ret_488 = __noswap_splat_laneq_s32(__rev0_488, __p1_488); \
-  __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \
+#define vdup_laneq_u8(__p0_488, __p1_488) __extension__ ({ \
+  uint8x16_t __s0_488 = __p0_488; \
+  uint8x16_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_488; \
+  __ret_488 = __noswap_splat_laneq_u8(__rev0_488, __p1_488); \
+  __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_488; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s64(__p0_489, __p1_489) __extension__ ({ \
-  int64x2_t __s0_489 = __p0_489; \
-  int64x1_t __ret_489; \
-  __ret_489 = splat_laneq_s64(__s0_489, __p1_489); \
+#define vdup_laneq_u32(__p0_489, __p1_489) __extension__ ({ \
+  uint32x4_t __s0_489 = __p0_489; \
+  uint32x2_t __ret_489; \
+  __ret_489 = splat_laneq_u32(__s0_489, __p1_489); \
   __ret_489; \
 })
 #else
-#define vdup_laneq_s64(__p0_490, __p1_490) __extension__ ({ \
-  int64x2_t __s0_490 = __p0_490; \
-  int64x2_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 1, 0); \
-  int64x1_t __ret_490; \
-  __ret_490 = __noswap_splat_laneq_s64(__rev0_490, __p1_490); \
+#define vdup_laneq_u32(__p0_490, __p1_490) __extension__ ({ \
+  uint32x4_t __s0_490 = __p0_490; \
+  uint32x4_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 3, 2, 1, 0); \
+  uint32x2_t __ret_490; \
+  __ret_490 = __noswap_splat_laneq_u32(__rev0_490, __p1_490); \
+  __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 1, 0); \
   __ret_490; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s16(__p0_491, __p1_491) __extension__ ({ \
-  int16x8_t __s0_491 = __p0_491; \
-  int16x4_t __ret_491; \
-  __ret_491 = splat_laneq_s16(__s0_491, __p1_491); \
+#define vdup_laneq_u64(__p0_491, __p1_491) __extension__ ({ \
+  uint64x2_t __s0_491 = __p0_491; \
+  uint64x1_t __ret_491; \
+  __ret_491 = splat_laneq_u64(__s0_491, __p1_491); \
   __ret_491; \
 })
 #else
-#define vdup_laneq_s16(__p0_492, __p1_492) __extension__ ({ \
-  int16x8_t __s0_492 = __p0_492; \
-  int16x8_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_492; \
-  __ret_492 = __noswap_splat_laneq_s16(__rev0_492, __p1_492); \
-  __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 3, 2, 1, 0); \
+#define vdup_laneq_u64(__p0_492, __p1_492) __extension__ ({ \
+  uint64x2_t __s0_492 = __p0_492; \
+  uint64x2_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 1, 0); \
+  uint64x1_t __ret_492; \
+  __ret_492 = __noswap_splat_laneq_u64(__rev0_492, __p1_492); \
   __ret_492; \
 })
 #endif
 
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u16(__p0_493, __p1_493) __extension__ ({ \
+  uint16x8_t __s0_493 = __p0_493; \
+  uint16x4_t __ret_493; \
+  __ret_493 = splat_laneq_u16(__s0_493, __p1_493); \
+  __ret_493; \
+})
+#else
+#define vdup_laneq_u16(__p0_494, __p1_494) __extension__ ({ \
+  uint16x8_t __s0_494 = __p0_494; \
+  uint16x8_t __rev0_494;  __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_494; \
+  __ret_494 = __noswap_splat_laneq_u16(__rev0_494, __p1_494); \
+  __ret_494 = __builtin_shufflevector(__ret_494, __ret_494, 3, 2, 1, 0); \
+  __ret_494; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s8(__p0_495, __p1_495) __extension__ ({ \
+  int8x16_t __s0_495 = __p0_495; \
+  int8x8_t __ret_495; \
+  __ret_495 = splat_laneq_s8(__s0_495, __p1_495); \
+  __ret_495; \
+})
+#else
+#define vdup_laneq_s8(__p0_496, __p1_496) __extension__ ({ \
+  int8x16_t __s0_496 = __p0_496; \
+  int8x16_t __rev0_496;  __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_496; \
+  __ret_496 = __noswap_splat_laneq_s8(__rev0_496, __p1_496); \
+  __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_496; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f64(__p0_497, __p1_497) __extension__ ({ \
+  float64x2_t __s0_497 = __p0_497; \
+  float64x1_t __ret_497; \
+  __ret_497 = splat_laneq_f64(__s0_497, __p1_497); \
+  __ret_497; \
+})
+#else
+#define vdup_laneq_f64(__p0_498, __p1_498) __extension__ ({ \
+  float64x2_t __s0_498 = __p0_498; \
+  float64x2_t __rev0_498;  __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 1, 0); \
+  float64x1_t __ret_498; \
+  __ret_498 = __noswap_splat_laneq_f64(__rev0_498, __p1_498); \
+  __ret_498; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f32(__p0_499, __p1_499) __extension__ ({ \
+  float32x4_t __s0_499 = __p0_499; \
+  float32x2_t __ret_499; \
+  __ret_499 = splat_laneq_f32(__s0_499, __p1_499); \
+  __ret_499; \
+})
+#else
+#define vdup_laneq_f32(__p0_500, __p1_500) __extension__ ({ \
+  float32x4_t __s0_500 = __p0_500; \
+  float32x4_t __rev0_500;  __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 3, 2, 1, 0); \
+  float32x2_t __ret_500; \
+  __ret_500 = __noswap_splat_laneq_f32(__rev0_500, __p1_500); \
+  __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \
+  __ret_500; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f16(__p0_501, __p1_501) __extension__ ({ \
+  float16x8_t __s0_501 = __p0_501; \
+  float16x4_t __ret_501; \
+  __ret_501 = splat_laneq_f16(__s0_501, __p1_501); \
+  __ret_501; \
+})
+#else
+#define vdup_laneq_f16(__p0_502, __p1_502) __extension__ ({ \
+  float16x8_t __s0_502 = __p0_502; \
+  float16x8_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __ret_502; \
+  __ret_502 = __noswap_splat_laneq_f16(__rev0_502, __p1_502); \
+  __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 3, 2, 1, 0); \
+  __ret_502; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s32(__p0_503, __p1_503) __extension__ ({ \
+  int32x4_t __s0_503 = __p0_503; \
+  int32x2_t __ret_503; \
+  __ret_503 = splat_laneq_s32(__s0_503, __p1_503); \
+  __ret_503; \
+})
+#else
+#define vdup_laneq_s32(__p0_504, __p1_504) __extension__ ({ \
+  int32x4_t __s0_504 = __p0_504; \
+  int32x4_t __rev0_504;  __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 3, 2, 1, 0); \
+  int32x2_t __ret_504; \
+  __ret_504 = __noswap_splat_laneq_s32(__rev0_504, __p1_504); \
+  __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 1, 0); \
+  __ret_504; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s64(__p0_505, __p1_505) __extension__ ({ \
+  int64x2_t __s0_505 = __p0_505; \
+  int64x1_t __ret_505; \
+  __ret_505 = splat_laneq_s64(__s0_505, __p1_505); \
+  __ret_505; \
+})
+#else
+#define vdup_laneq_s64(__p0_506, __p1_506) __extension__ ({ \
+  int64x2_t __s0_506 = __p0_506; \
+  int64x2_t __rev0_506;  __rev0_506 = __builtin_shufflevector(__s0_506, __s0_506, 1, 0); \
+  int64x1_t __ret_506; \
+  __ret_506 = __noswap_splat_laneq_s64(__rev0_506, __p1_506); \
+  __ret_506; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s16(__p0_507, __p1_507) __extension__ ({ \
+  int16x8_t __s0_507 = __p0_507; \
+  int16x4_t __ret_507; \
+  __ret_507 = splat_laneq_s16(__s0_507, __p1_507); \
+  __ret_507; \
+})
+#else
+#define vdup_laneq_s16(__p0_508, __p1_508) __extension__ ({ \
+  int16x8_t __s0_508 = __p0_508; \
+  int16x8_t __rev0_508;  __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_508; \
+  __ret_508 = __noswap_splat_laneq_s16(__rev0_508, __p1_508); \
+  __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 3, 2, 1, 0); \
+  __ret_508; \
+})
+#endif
+
 __ai poly64x1_t vdup_n_p64(poly64_t __p0) {
   poly64x1_t __ret;
   __ret = (poly64x1_t) {__p0};
@@ -52903,246 +53091,246 @@
   __ret = vfma_f64(__p0, -__p1, __p2);
   return __ret;
 }
-#define vfmsd_lane_f64(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \
-  float64_t __s0_493 = __p0_493; \
-  float64_t __s1_493 = __p1_493; \
-  float64x1_t __s2_493 = __p2_493; \
-  float64_t __ret_493; \
-  __ret_493 = vfmad_lane_f64(__s0_493, -__s1_493, __s2_493, __p3_493); \
-  __ret_493; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_lane_f32(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \
-  float32_t __s0_494 = __p0_494; \
-  float32_t __s1_494 = __p1_494; \
-  float32x2_t __s2_494 = __p2_494; \
-  float32_t __ret_494; \
-  __ret_494 = vfmas_lane_f32(__s0_494, -__s1_494, __s2_494, __p3_494); \
-  __ret_494; \
-})
-#else
-#define vfmss_lane_f32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \
-  float32_t __s0_495 = __p0_495; \
-  float32_t __s1_495 = __p1_495; \
-  float32x2_t __s2_495 = __p2_495; \
-  float32x2_t __rev2_495;  __rev2_495 = __builtin_shufflevector(__s2_495, __s2_495, 1, 0); \
-  float32_t __ret_495; \
-  __ret_495 = __noswap_vfmas_lane_f32(__s0_495, -__s1_495, __rev2_495, __p3_495); \
-  __ret_495; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f64(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \
-  float64x2_t __s0_496 = __p0_496; \
-  float64x2_t __s1_496 = __p1_496; \
-  float64x1_t __s2_496 = __p2_496; \
-  float64x2_t __ret_496; \
-  __ret_496 = vfmaq_lane_f64(__s0_496, -__s1_496, __s2_496, __p3_496); \
-  __ret_496; \
-})
-#else
-#define vfmsq_lane_f64(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \
-  float64x2_t __s0_497 = __p0_497; \
-  float64x2_t __s1_497 = __p1_497; \
-  float64x1_t __s2_497 = __p2_497; \
-  float64x2_t __rev0_497;  __rev0_497 = __builtin_shufflevector(__s0_497, __s0_497, 1, 0); \
-  float64x2_t __rev1_497;  __rev1_497 = __builtin_shufflevector(__s1_497, __s1_497, 1, 0); \
-  float64x2_t __ret_497; \
-  __ret_497 = __noswap_vfmaq_lane_f64(__rev0_497, -__rev1_497, __s2_497, __p3_497); \
-  __ret_497 = __builtin_shufflevector(__ret_497, __ret_497, 1, 0); \
-  __ret_497; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f32(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \
-  float32x4_t __s0_498 = __p0_498; \
-  float32x4_t __s1_498 = __p1_498; \
-  float32x2_t __s2_498 = __p2_498; \
-  float32x4_t __ret_498; \
-  __ret_498 = vfmaq_lane_f32(__s0_498, -__s1_498, __s2_498, __p3_498); \
-  __ret_498; \
-})
-#else
-#define vfmsq_lane_f32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \
-  float32x4_t __s0_499 = __p0_499; \
-  float32x4_t __s1_499 = __p1_499; \
-  float32x2_t __s2_499 = __p2_499; \
-  float32x4_t __rev0_499;  __rev0_499 = __builtin_shufflevector(__s0_499, __s0_499, 3, 2, 1, 0); \
-  float32x4_t __rev1_499;  __rev1_499 = __builtin_shufflevector(__s1_499, __s1_499, 3, 2, 1, 0); \
-  float32x2_t __rev2_499;  __rev2_499 = __builtin_shufflevector(__s2_499, __s2_499, 1, 0); \
-  float32x4_t __ret_499; \
-  __ret_499 = __noswap_vfmaq_lane_f32(__rev0_499, -__rev1_499, __rev2_499, __p3_499); \
-  __ret_499 = __builtin_shufflevector(__ret_499, __ret_499, 3, 2, 1, 0); \
-  __ret_499; \
-})
-#endif
-
-#define vfms_lane_f64(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \
-  float64x1_t __s0_500 = __p0_500; \
-  float64x1_t __s1_500 = __p1_500; \
-  float64x1_t __s2_500 = __p2_500; \
-  float64x1_t __ret_500; \
-  __ret_500 = vfma_lane_f64(__s0_500, -__s1_500, __s2_500, __p3_500); \
-  __ret_500; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f32(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \
-  float32x2_t __s0_501 = __p0_501; \
-  float32x2_t __s1_501 = __p1_501; \
-  float32x2_t __s2_501 = __p2_501; \
-  float32x2_t __ret_501; \
-  __ret_501 = vfma_lane_f32(__s0_501, -__s1_501, __s2_501, __p3_501); \
-  __ret_501; \
-})
-#else
-#define vfms_lane_f32(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \
-  float32x2_t __s0_502 = __p0_502; \
-  float32x2_t __s1_502 = __p1_502; \
-  float32x2_t __s2_502 = __p2_502; \
-  float32x2_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 1, 0); \
-  float32x2_t __rev1_502;  __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 1, 0); \
-  float32x2_t __rev2_502;  __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 1, 0); \
-  float32x2_t __ret_502; \
-  __ret_502 = __noswap_vfma_lane_f32(__rev0_502, -__rev1_502, __rev2_502, __p3_502); \
-  __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 1, 0); \
-  __ret_502; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsd_laneq_f64(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \
-  float64_t __s0_503 = __p0_503; \
-  float64_t __s1_503 = __p1_503; \
-  float64x2_t __s2_503 = __p2_503; \
-  float64_t __ret_503; \
-  __ret_503 = vfmad_laneq_f64(__s0_503, -__s1_503, __s2_503, __p3_503); \
-  __ret_503; \
-})
-#else
-#define vfmsd_laneq_f64(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \
-  float64_t __s0_504 = __p0_504; \
-  float64_t __s1_504 = __p1_504; \
-  float64x2_t __s2_504 = __p2_504; \
-  float64x2_t __rev2_504;  __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 1, 0); \
-  float64_t __ret_504; \
-  __ret_504 = __noswap_vfmad_laneq_f64(__s0_504, -__s1_504, __rev2_504, __p3_504); \
-  __ret_504; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_laneq_f32(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \
-  float32_t __s0_505 = __p0_505; \
-  float32_t __s1_505 = __p1_505; \
-  float32x4_t __s2_505 = __p2_505; \
-  float32_t __ret_505; \
-  __ret_505 = vfmas_laneq_f32(__s0_505, -__s1_505, __s2_505, __p3_505); \
-  __ret_505; \
-})
-#else
-#define vfmss_laneq_f32(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \
-  float32_t __s0_506 = __p0_506; \
-  float32_t __s1_506 = __p1_506; \
-  float32x4_t __s2_506 = __p2_506; \
-  float32x4_t __rev2_506;  __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 3, 2, 1, 0); \
-  float32_t __ret_506; \
-  __ret_506 = __noswap_vfmas_laneq_f32(__s0_506, -__s1_506, __rev2_506, __p3_506); \
-  __ret_506; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f64(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \
-  float64x2_t __s0_507 = __p0_507; \
-  float64x2_t __s1_507 = __p1_507; \
-  float64x2_t __s2_507 = __p2_507; \
-  float64x2_t __ret_507; \
-  __ret_507 = vfmaq_laneq_f64(__s0_507, -__s1_507, __s2_507, __p3_507); \
-  __ret_507; \
-})
-#else
-#define vfmsq_laneq_f64(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \
-  float64x2_t __s0_508 = __p0_508; \
-  float64x2_t __s1_508 = __p1_508; \
-  float64x2_t __s2_508 = __p2_508; \
-  float64x2_t __rev0_508;  __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \
-  float64x2_t __rev1_508;  __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \
-  float64x2_t __rev2_508;  __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 1, 0); \
-  float64x2_t __ret_508; \
-  __ret_508 = __noswap_vfmaq_laneq_f64(__rev0_508, -__rev1_508, __rev2_508, __p3_508); \
-  __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \
-  __ret_508; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f32(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
-  float32x4_t __s0_509 = __p0_509; \
-  float32x4_t __s1_509 = __p1_509; \
-  float32x4_t __s2_509 = __p2_509; \
-  float32x4_t __ret_509; \
-  __ret_509 = vfmaq_laneq_f32(__s0_509, -__s1_509, __s2_509, __p3_509); \
+#define vfmsd_lane_f64(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
+  float64_t __s0_509 = __p0_509; \
+  float64_t __s1_509 = __p1_509; \
+  float64x1_t __s2_509 = __p2_509; \
+  float64_t __ret_509; \
+  __ret_509 = vfmad_lane_f64(__s0_509, -__s1_509, __s2_509, __p3_509); \
   __ret_509; \
 })
-#else
-#define vfmsq_laneq_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
-  float32x4_t __s0_510 = __p0_510; \
-  float32x4_t __s1_510 = __p1_510; \
-  float32x4_t __s2_510 = __p2_510; \
-  float32x4_t __rev0_510;  __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \
-  float32x4_t __rev1_510;  __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \
-  float32x4_t __rev2_510;  __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 3, 2, 1, 0); \
-  float32x4_t __ret_510; \
-  __ret_510 = __noswap_vfmaq_laneq_f32(__rev0_510, -__rev1_510, __rev2_510, __p3_510); \
-  __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \
+#ifdef __LITTLE_ENDIAN__
+#define vfmss_lane_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
+  float32_t __s0_510 = __p0_510; \
+  float32_t __s1_510 = __p1_510; \
+  float32x2_t __s2_510 = __p2_510; \
+  float32_t __ret_510; \
+  __ret_510 = vfmas_lane_f32(__s0_510, -__s1_510, __s2_510, __p3_510); \
   __ret_510; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f64(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \
-  float64x1_t __s0_511 = __p0_511; \
-  float64x1_t __s1_511 = __p1_511; \
-  float64x2_t __s2_511 = __p2_511; \
-  float64x1_t __ret_511; \
-  __ret_511 = vfma_laneq_f64(__s0_511, -__s1_511, __s2_511, __p3_511); \
+#else
+#define vfmss_lane_f32(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \
+  float32_t __s0_511 = __p0_511; \
+  float32_t __s1_511 = __p1_511; \
+  float32x2_t __s2_511 = __p2_511; \
+  float32x2_t __rev2_511;  __rev2_511 = __builtin_shufflevector(__s2_511, __s2_511, 1, 0); \
+  float32_t __ret_511; \
+  __ret_511 = __noswap_vfmas_lane_f32(__s0_511, -__s1_511, __rev2_511, __p3_511); \
   __ret_511; \
 })
-#else
-#define vfms_laneq_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \
-  float64x1_t __s0_512 = __p0_512; \
-  float64x1_t __s1_512 = __p1_512; \
-  float64x2_t __s2_512 = __p2_512; \
-  float64x2_t __rev2_512;  __rev2_512 = __builtin_shufflevector(__s2_512, __s2_512, 1, 0); \
-  float64x1_t __ret_512; \
-  __ret_512 = __noswap_vfma_laneq_f64(__s0_512, -__s1_512, __rev2_512, __p3_512); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_lane_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \
+  float64x2_t __s0_512 = __p0_512; \
+  float64x2_t __s1_512 = __p1_512; \
+  float64x1_t __s2_512 = __p2_512; \
+  float64x2_t __ret_512; \
+  __ret_512 = vfmaq_lane_f64(__s0_512, -__s1_512, __s2_512, __p3_512); \
   __ret_512; \
 })
+#else
+#define vfmsq_lane_f64(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \
+  float64x2_t __s0_513 = __p0_513; \
+  float64x2_t __s1_513 = __p1_513; \
+  float64x1_t __s2_513 = __p2_513; \
+  float64x2_t __rev0_513;  __rev0_513 = __builtin_shufflevector(__s0_513, __s0_513, 1, 0); \
+  float64x2_t __rev1_513;  __rev1_513 = __builtin_shufflevector(__s1_513, __s1_513, 1, 0); \
+  float64x2_t __ret_513; \
+  __ret_513 = __noswap_vfmaq_lane_f64(__rev0_513, -__rev1_513, __s2_513, __p3_513); \
+  __ret_513 = __builtin_shufflevector(__ret_513, __ret_513, 1, 0); \
+  __ret_513; \
+})
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \
-  float32x2_t __s0_513 = __p0_513; \
-  float32x2_t __s1_513 = __p1_513; \
-  float32x4_t __s2_513 = __p2_513; \
-  float32x2_t __ret_513; \
-  __ret_513 = vfma_laneq_f32(__s0_513, -__s1_513, __s2_513, __p3_513); \
-  __ret_513; \
+#define vfmsq_lane_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \
+  float32x4_t __s0_514 = __p0_514; \
+  float32x4_t __s1_514 = __p1_514; \
+  float32x2_t __s2_514 = __p2_514; \
+  float32x4_t __ret_514; \
+  __ret_514 = vfmaq_lane_f32(__s0_514, -__s1_514, __s2_514, __p3_514); \
+  __ret_514; \
 })
 #else
-#define vfms_laneq_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \
-  float32x2_t __s0_514 = __p0_514; \
-  float32x2_t __s1_514 = __p1_514; \
-  float32x4_t __s2_514 = __p2_514; \
-  float32x2_t __rev0_514;  __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 1, 0); \
-  float32x2_t __rev1_514;  __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 1, 0); \
-  float32x4_t __rev2_514;  __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 3, 2, 1, 0); \
-  float32x2_t __ret_514; \
-  __ret_514 = __noswap_vfma_laneq_f32(__rev0_514, -__rev1_514, __rev2_514, __p3_514); \
-  __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 1, 0); \
-  __ret_514; \
+#define vfmsq_lane_f32(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \
+  float32x4_t __s0_515 = __p0_515; \
+  float32x4_t __s1_515 = __p1_515; \
+  float32x2_t __s2_515 = __p2_515; \
+  float32x4_t __rev0_515;  __rev0_515 = __builtin_shufflevector(__s0_515, __s0_515, 3, 2, 1, 0); \
+  float32x4_t __rev1_515;  __rev1_515 = __builtin_shufflevector(__s1_515, __s1_515, 3, 2, 1, 0); \
+  float32x2_t __rev2_515;  __rev2_515 = __builtin_shufflevector(__s2_515, __s2_515, 1, 0); \
+  float32x4_t __ret_515; \
+  __ret_515 = __noswap_vfmaq_lane_f32(__rev0_515, -__rev1_515, __rev2_515, __p3_515); \
+  __ret_515 = __builtin_shufflevector(__ret_515, __ret_515, 3, 2, 1, 0); \
+  __ret_515; \
+})
+#endif
+
+#define vfms_lane_f64(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \
+  float64x1_t __s0_516 = __p0_516; \
+  float64x1_t __s1_516 = __p1_516; \
+  float64x1_t __s2_516 = __p2_516; \
+  float64x1_t __ret_516; \
+  __ret_516 = vfma_lane_f64(__s0_516, -__s1_516, __s2_516, __p3_516); \
+  __ret_516; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vfms_lane_f32(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \
+  float32x2_t __s0_517 = __p0_517; \
+  float32x2_t __s1_517 = __p1_517; \
+  float32x2_t __s2_517 = __p2_517; \
+  float32x2_t __ret_517; \
+  __ret_517 = vfma_lane_f32(__s0_517, -__s1_517, __s2_517, __p3_517); \
+  __ret_517; \
+})
+#else
+#define vfms_lane_f32(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \
+  float32x2_t __s0_518 = __p0_518; \
+  float32x2_t __s1_518 = __p1_518; \
+  float32x2_t __s2_518 = __p2_518; \
+  float32x2_t __rev0_518;  __rev0_518 = __builtin_shufflevector(__s0_518, __s0_518, 1, 0); \
+  float32x2_t __rev1_518;  __rev1_518 = __builtin_shufflevector(__s1_518, __s1_518, 1, 0); \
+  float32x2_t __rev2_518;  __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 1, 0); \
+  float32x2_t __ret_518; \
+  __ret_518 = __noswap_vfma_lane_f32(__rev0_518, -__rev1_518, __rev2_518, __p3_518); \
+  __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 1, 0); \
+  __ret_518; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsd_laneq_f64(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \
+  float64_t __s0_519 = __p0_519; \
+  float64_t __s1_519 = __p1_519; \
+  float64x2_t __s2_519 = __p2_519; \
+  float64_t __ret_519; \
+  __ret_519 = vfmad_laneq_f64(__s0_519, -__s1_519, __s2_519, __p3_519); \
+  __ret_519; \
+})
+#else
+#define vfmsd_laneq_f64(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \
+  float64_t __s0_520 = __p0_520; \
+  float64_t __s1_520 = __p1_520; \
+  float64x2_t __s2_520 = __p2_520; \
+  float64x2_t __rev2_520;  __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 1, 0); \
+  float64_t __ret_520; \
+  __ret_520 = __noswap_vfmad_laneq_f64(__s0_520, -__s1_520, __rev2_520, __p3_520); \
+  __ret_520; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmss_laneq_f32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \
+  float32_t __s0_521 = __p0_521; \
+  float32_t __s1_521 = __p1_521; \
+  float32x4_t __s2_521 = __p2_521; \
+  float32_t __ret_521; \
+  __ret_521 = vfmas_laneq_f32(__s0_521, -__s1_521, __s2_521, __p3_521); \
+  __ret_521; \
+})
+#else
+#define vfmss_laneq_f32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \
+  float32_t __s0_522 = __p0_522; \
+  float32_t __s1_522 = __p1_522; \
+  float32x4_t __s2_522 = __p2_522; \
+  float32x4_t __rev2_522;  __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \
+  float32_t __ret_522; \
+  __ret_522 = __noswap_vfmas_laneq_f32(__s0_522, -__s1_522, __rev2_522, __p3_522); \
+  __ret_522; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_laneq_f64(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \
+  float64x2_t __s0_523 = __p0_523; \
+  float64x2_t __s1_523 = __p1_523; \
+  float64x2_t __s2_523 = __p2_523; \
+  float64x2_t __ret_523; \
+  __ret_523 = vfmaq_laneq_f64(__s0_523, -__s1_523, __s2_523, __p3_523); \
+  __ret_523; \
+})
+#else
+#define vfmsq_laneq_f64(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \
+  float64x2_t __s0_524 = __p0_524; \
+  float64x2_t __s1_524 = __p1_524; \
+  float64x2_t __s2_524 = __p2_524; \
+  float64x2_t __rev0_524;  __rev0_524 = __builtin_shufflevector(__s0_524, __s0_524, 1, 0); \
+  float64x2_t __rev1_524;  __rev1_524 = __builtin_shufflevector(__s1_524, __s1_524, 1, 0); \
+  float64x2_t __rev2_524;  __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 1, 0); \
+  float64x2_t __ret_524; \
+  __ret_524 = __noswap_vfmaq_laneq_f64(__rev0_524, -__rev1_524, __rev2_524, __p3_524); \
+  __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 1, 0); \
+  __ret_524; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_laneq_f32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \
+  float32x4_t __s0_525 = __p0_525; \
+  float32x4_t __s1_525 = __p1_525; \
+  float32x4_t __s2_525 = __p2_525; \
+  float32x4_t __ret_525; \
+  __ret_525 = vfmaq_laneq_f32(__s0_525, -__s1_525, __s2_525, __p3_525); \
+  __ret_525; \
+})
+#else
+#define vfmsq_laneq_f32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \
+  float32x4_t __s0_526 = __p0_526; \
+  float32x4_t __s1_526 = __p1_526; \
+  float32x4_t __s2_526 = __p2_526; \
+  float32x4_t __rev0_526;  __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 3, 2, 1, 0); \
+  float32x4_t __rev1_526;  __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 3, 2, 1, 0); \
+  float32x4_t __rev2_526;  __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \
+  float32x4_t __ret_526; \
+  __ret_526 = __noswap_vfmaq_laneq_f32(__rev0_526, -__rev1_526, __rev2_526, __p3_526); \
+  __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 3, 2, 1, 0); \
+  __ret_526; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_laneq_f64(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \
+  float64x1_t __s0_527 = __p0_527; \
+  float64x1_t __s1_527 = __p1_527; \
+  float64x2_t __s2_527 = __p2_527; \
+  float64x1_t __ret_527; \
+  __ret_527 = vfma_laneq_f64(__s0_527, -__s1_527, __s2_527, __p3_527); \
+  __ret_527; \
+})
+#else
+#define vfms_laneq_f64(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \
+  float64x1_t __s0_528 = __p0_528; \
+  float64x1_t __s1_528 = __p1_528; \
+  float64x2_t __s2_528 = __p2_528; \
+  float64x2_t __rev2_528;  __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 1, 0); \
+  float64x1_t __ret_528; \
+  __ret_528 = __noswap_vfma_laneq_f64(__s0_528, -__s1_528, __rev2_528, __p3_528); \
+  __ret_528; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_laneq_f32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \
+  float32x2_t __s0_529 = __p0_529; \
+  float32x2_t __s1_529 = __p1_529; \
+  float32x4_t __s2_529 = __p2_529; \
+  float32x2_t __ret_529; \
+  __ret_529 = vfma_laneq_f32(__s0_529, -__s1_529, __s2_529, __p3_529); \
+  __ret_529; \
+})
+#else
+#define vfms_laneq_f32(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \
+  float32x2_t __s0_530 = __p0_530; \
+  float32x2_t __s1_530 = __p1_530; \
+  float32x4_t __s2_530 = __p2_530; \
+  float32x2_t __rev0_530;  __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 1, 0); \
+  float32x2_t __rev1_530;  __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 1, 0); \
+  float32x4_t __rev2_530;  __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 3, 2, 1, 0); \
+  float32x2_t __ret_530; \
+  __ret_530 = __noswap_vfma_laneq_f32(__rev0_530, -__rev1_530, __rev2_530, __p3_530); \
+  __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 1, 0); \
+  __ret_530; \
 })
 #endif
 
@@ -55164,534 +55352,534 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u32(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \
-  uint32x4_t __s0_515 = __p0_515; \
-  uint32x4_t __s1_515 = __p1_515; \
-  uint32x4_t __s2_515 = __p2_515; \
-  uint32x4_t __ret_515; \
-  __ret_515 = __s0_515 + __s1_515 * splatq_laneq_u32(__s2_515, __p3_515); \
-  __ret_515; \
-})
-#else
-#define vmlaq_laneq_u32(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \
-  uint32x4_t __s0_516 = __p0_516; \
-  uint32x4_t __s1_516 = __p1_516; \
-  uint32x4_t __s2_516 = __p2_516; \
-  uint32x4_t __rev0_516;  __rev0_516 = __builtin_shufflevector(__s0_516, __s0_516, 3, 2, 1, 0); \
-  uint32x4_t __rev1_516;  __rev1_516 = __builtin_shufflevector(__s1_516, __s1_516, 3, 2, 1, 0); \
-  uint32x4_t __rev2_516;  __rev2_516 = __builtin_shufflevector(__s2_516, __s2_516, 3, 2, 1, 0); \
-  uint32x4_t __ret_516; \
-  __ret_516 = __rev0_516 + __rev1_516 * __noswap_splatq_laneq_u32(__rev2_516, __p3_516); \
-  __ret_516 = __builtin_shufflevector(__ret_516, __ret_516, 3, 2, 1, 0); \
-  __ret_516; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u16(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \
-  uint16x8_t __s0_517 = __p0_517; \
-  uint16x8_t __s1_517 = __p1_517; \
-  uint16x8_t __s2_517 = __p2_517; \
-  uint16x8_t __ret_517; \
-  __ret_517 = __s0_517 + __s1_517 * splatq_laneq_u16(__s2_517, __p3_517); \
-  __ret_517; \
-})
-#else
-#define vmlaq_laneq_u16(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \
-  uint16x8_t __s0_518 = __p0_518; \
-  uint16x8_t __s1_518 = __p1_518; \
-  uint16x8_t __s2_518 = __p2_518; \
-  uint16x8_t __rev0_518;  __rev0_518 = __builtin_shufflevector(__s0_518, __s0_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_518;  __rev1_518 = __builtin_shufflevector(__s1_518, __s1_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_518;  __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_518; \
-  __ret_518 = __rev0_518 + __rev1_518 * __noswap_splatq_laneq_u16(__rev2_518, __p3_518); \
-  __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_518; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_f32(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \
-  float32x4_t __s0_519 = __p0_519; \
-  float32x4_t __s1_519 = __p1_519; \
-  float32x4_t __s2_519 = __p2_519; \
-  float32x4_t __ret_519; \
-  __ret_519 = __s0_519 + __s1_519 * splatq_laneq_f32(__s2_519, __p3_519); \
-  __ret_519; \
-})
-#else
-#define vmlaq_laneq_f32(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \
-  float32x4_t __s0_520 = __p0_520; \
-  float32x4_t __s1_520 = __p1_520; \
-  float32x4_t __s2_520 = __p2_520; \
-  float32x4_t __rev0_520;  __rev0_520 = __builtin_shufflevector(__s0_520, __s0_520, 3, 2, 1, 0); \
-  float32x4_t __rev1_520;  __rev1_520 = __builtin_shufflevector(__s1_520, __s1_520, 3, 2, 1, 0); \
-  float32x4_t __rev2_520;  __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 3, 2, 1, 0); \
-  float32x4_t __ret_520; \
-  __ret_520 = __rev0_520 + __rev1_520 * __noswap_splatq_laneq_f32(__rev2_520, __p3_520); \
-  __ret_520 = __builtin_shufflevector(__ret_520, __ret_520, 3, 2, 1, 0); \
-  __ret_520; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \
-  int32x4_t __s0_521 = __p0_521; \
-  int32x4_t __s1_521 = __p1_521; \
-  int32x4_t __s2_521 = __p2_521; \
-  int32x4_t __ret_521; \
-  __ret_521 = __s0_521 + __s1_521 * splatq_laneq_s32(__s2_521, __p3_521); \
-  __ret_521; \
-})
-#else
-#define vmlaq_laneq_s32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \
-  int32x4_t __s0_522 = __p0_522; \
-  int32x4_t __s1_522 = __p1_522; \
-  int32x4_t __s2_522 = __p2_522; \
-  int32x4_t __rev0_522;  __rev0_522 = __builtin_shufflevector(__s0_522, __s0_522, 3, 2, 1, 0); \
-  int32x4_t __rev1_522;  __rev1_522 = __builtin_shufflevector(__s1_522, __s1_522, 3, 2, 1, 0); \
-  int32x4_t __rev2_522;  __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \
-  int32x4_t __ret_522; \
-  __ret_522 = __rev0_522 + __rev1_522 * __noswap_splatq_laneq_s32(__rev2_522, __p3_522); \
-  __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); \
-  __ret_522; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s16(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \
-  int16x8_t __s0_523 = __p0_523; \
-  int16x8_t __s1_523 = __p1_523; \
-  int16x8_t __s2_523 = __p2_523; \
-  int16x8_t __ret_523; \
-  __ret_523 = __s0_523 + __s1_523 * splatq_laneq_s16(__s2_523, __p3_523); \
-  __ret_523; \
-})
-#else
-#define vmlaq_laneq_s16(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \
-  int16x8_t __s0_524 = __p0_524; \
-  int16x8_t __s1_524 = __p1_524; \
-  int16x8_t __s2_524 = __p2_524; \
-  int16x8_t __rev0_524;  __rev0_524 = __builtin_shufflevector(__s0_524, __s0_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_524;  __rev1_524 = __builtin_shufflevector(__s1_524, __s1_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_524;  __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_524; \
-  __ret_524 = __rev0_524 + __rev1_524 * __noswap_splatq_laneq_s16(__rev2_524, __p3_524); \
-  __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_524; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \
-  uint32x2_t __s0_525 = __p0_525; \
-  uint32x2_t __s1_525 = __p1_525; \
-  uint32x4_t __s2_525 = __p2_525; \
-  uint32x2_t __ret_525; \
-  __ret_525 = __s0_525 + __s1_525 * splat_laneq_u32(__s2_525, __p3_525); \
-  __ret_525; \
-})
-#else
-#define vmla_laneq_u32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \
-  uint32x2_t __s0_526 = __p0_526; \
-  uint32x2_t __s1_526 = __p1_526; \
-  uint32x4_t __s2_526 = __p2_526; \
-  uint32x2_t __rev0_526;  __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 1, 0); \
-  uint32x2_t __rev1_526;  __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 1, 0); \
-  uint32x4_t __rev2_526;  __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \
-  uint32x2_t __ret_526; \
-  __ret_526 = __rev0_526 + __rev1_526 * __noswap_splat_laneq_u32(__rev2_526, __p3_526); \
-  __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 1, 0); \
-  __ret_526; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u16(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \
-  uint16x4_t __s0_527 = __p0_527; \
-  uint16x4_t __s1_527 = __p1_527; \
-  uint16x8_t __s2_527 = __p2_527; \
-  uint16x4_t __ret_527; \
-  __ret_527 = __s0_527 + __s1_527 * splat_laneq_u16(__s2_527, __p3_527); \
-  __ret_527; \
-})
-#else
-#define vmla_laneq_u16(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \
-  uint16x4_t __s0_528 = __p0_528; \
-  uint16x4_t __s1_528 = __p1_528; \
-  uint16x8_t __s2_528 = __p2_528; \
-  uint16x4_t __rev0_528;  __rev0_528 = __builtin_shufflevector(__s0_528, __s0_528, 3, 2, 1, 0); \
-  uint16x4_t __rev1_528;  __rev1_528 = __builtin_shufflevector(__s1_528, __s1_528, 3, 2, 1, 0); \
-  uint16x8_t __rev2_528;  __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_528; \
-  __ret_528 = __rev0_528 + __rev1_528 * __noswap_splat_laneq_u16(__rev2_528, __p3_528); \
-  __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 3, 2, 1, 0); \
-  __ret_528; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_f32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \
-  float32x2_t __s0_529 = __p0_529; \
-  float32x2_t __s1_529 = __p1_529; \
-  float32x4_t __s2_529 = __p2_529; \
-  float32x2_t __ret_529; \
-  __ret_529 = __s0_529 + __s1_529 * splat_laneq_f32(__s2_529, __p3_529); \
-  __ret_529; \
-})
-#else
-#define vmla_laneq_f32(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \
-  float32x2_t __s0_530 = __p0_530; \
-  float32x2_t __s1_530 = __p1_530; \
-  float32x4_t __s2_530 = __p2_530; \
-  float32x2_t __rev0_530;  __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 1, 0); \
-  float32x2_t __rev1_530;  __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 1, 0); \
-  float32x4_t __rev2_530;  __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 3, 2, 1, 0); \
-  float32x2_t __ret_530; \
-  __ret_530 = __rev0_530 + __rev1_530 * __noswap_splat_laneq_f32(__rev2_530, __p3_530); \
-  __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 1, 0); \
-  __ret_530; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \
-  int32x2_t __s0_531 = __p0_531; \
-  int32x2_t __s1_531 = __p1_531; \
-  int32x4_t __s2_531 = __p2_531; \
-  int32x2_t __ret_531; \
-  __ret_531 = __s0_531 + __s1_531 * splat_laneq_s32(__s2_531, __p3_531); \
+#define vmlaq_laneq_u32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \
+  uint32x4_t __s0_531 = __p0_531; \
+  uint32x4_t __s1_531 = __p1_531; \
+  uint32x4_t __s2_531 = __p2_531; \
+  uint32x4_t __ret_531; \
+  __ret_531 = __s0_531 + __s1_531 * splatq_laneq_u32(__s2_531, __p3_531); \
   __ret_531; \
 })
 #else
-#define vmla_laneq_s32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \
-  int32x2_t __s0_532 = __p0_532; \
-  int32x2_t __s1_532 = __p1_532; \
-  int32x4_t __s2_532 = __p2_532; \
-  int32x2_t __rev0_532;  __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 1, 0); \
-  int32x2_t __rev1_532;  __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 1, 0); \
-  int32x4_t __rev2_532;  __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \
-  int32x2_t __ret_532; \
-  __ret_532 = __rev0_532 + __rev1_532 * __noswap_splat_laneq_s32(__rev2_532, __p3_532); \
-  __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 1, 0); \
+#define vmlaq_laneq_u32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \
+  uint32x4_t __s0_532 = __p0_532; \
+  uint32x4_t __s1_532 = __p1_532; \
+  uint32x4_t __s2_532 = __p2_532; \
+  uint32x4_t __rev0_532;  __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 3, 2, 1, 0); \
+  uint32x4_t __rev1_532;  __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 3, 2, 1, 0); \
+  uint32x4_t __rev2_532;  __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \
+  uint32x4_t __ret_532; \
+  __ret_532 = __rev0_532 + __rev1_532 * __noswap_splatq_laneq_u32(__rev2_532, __p3_532); \
+  __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 3, 2, 1, 0); \
   __ret_532; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s16(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \
-  int16x4_t __s0_533 = __p0_533; \
-  int16x4_t __s1_533 = __p1_533; \
-  int16x8_t __s2_533 = __p2_533; \
-  int16x4_t __ret_533; \
-  __ret_533 = __s0_533 + __s1_533 * splat_laneq_s16(__s2_533, __p3_533); \
+#define vmlaq_laneq_u16(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \
+  uint16x8_t __s0_533 = __p0_533; \
+  uint16x8_t __s1_533 = __p1_533; \
+  uint16x8_t __s2_533 = __p2_533; \
+  uint16x8_t __ret_533; \
+  __ret_533 = __s0_533 + __s1_533 * splatq_laneq_u16(__s2_533, __p3_533); \
   __ret_533; \
 })
 #else
-#define vmla_laneq_s16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \
-  int16x4_t __s0_534 = __p0_534; \
-  int16x4_t __s1_534 = __p1_534; \
-  int16x8_t __s2_534 = __p2_534; \
-  int16x4_t __rev0_534;  __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 3, 2, 1, 0); \
-  int16x4_t __rev1_534;  __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 3, 2, 1, 0); \
-  int16x8_t __rev2_534;  __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_534; \
-  __ret_534 = __rev0_534 + __rev1_534 * __noswap_splat_laneq_s16(__rev2_534, __p3_534); \
-  __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 3, 2, 1, 0); \
+#define vmlaq_laneq_u16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \
+  uint16x8_t __s0_534 = __p0_534; \
+  uint16x8_t __s1_534 = __p1_534; \
+  uint16x8_t __s2_534 = __p2_534; \
+  uint16x8_t __rev0_534;  __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_534;  __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_534;  __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_534; \
+  __ret_534 = __rev0_534 + __rev1_534 * __noswap_splatq_laneq_u16(__rev2_534, __p3_534); \
+  __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_534; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u32(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \
-  uint64x2_t __s0_535 = __p0_535; \
-  uint32x4_t __s1_535 = __p1_535; \
-  uint32x2_t __s2_535 = __p2_535; \
-  uint64x2_t __ret_535; \
-  __ret_535 = __s0_535 + vmull_u32(vget_high_u32(__s1_535), splat_lane_u32(__s2_535, __p3_535)); \
+#define vmlaq_laneq_f32(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \
+  float32x4_t __s0_535 = __p0_535; \
+  float32x4_t __s1_535 = __p1_535; \
+  float32x4_t __s2_535 = __p2_535; \
+  float32x4_t __ret_535; \
+  __ret_535 = __s0_535 + __s1_535 * splatq_laneq_f32(__s2_535, __p3_535); \
   __ret_535; \
 })
 #else
-#define vmlal_high_lane_u32(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \
-  uint64x2_t __s0_536 = __p0_536; \
-  uint32x4_t __s1_536 = __p1_536; \
-  uint32x2_t __s2_536 = __p2_536; \
-  uint64x2_t __rev0_536;  __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 1, 0); \
-  uint32x4_t __rev1_536;  __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 3, 2, 1, 0); \
-  uint32x2_t __rev2_536;  __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 1, 0); \
-  uint64x2_t __ret_536; \
-  __ret_536 = __rev0_536 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_536), __noswap_splat_lane_u32(__rev2_536, __p3_536)); \
-  __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 1, 0); \
+#define vmlaq_laneq_f32(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \
+  float32x4_t __s0_536 = __p0_536; \
+  float32x4_t __s1_536 = __p1_536; \
+  float32x4_t __s2_536 = __p2_536; \
+  float32x4_t __rev0_536;  __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 3, 2, 1, 0); \
+  float32x4_t __rev1_536;  __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 3, 2, 1, 0); \
+  float32x4_t __rev2_536;  __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 3, 2, 1, 0); \
+  float32x4_t __ret_536; \
+  __ret_536 = __rev0_536 + __rev1_536 * __noswap_splatq_laneq_f32(__rev2_536, __p3_536); \
+  __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 3, 2, 1, 0); \
   __ret_536; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u16(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \
-  uint32x4_t __s0_537 = __p0_537; \
-  uint16x8_t __s1_537 = __p1_537; \
-  uint16x4_t __s2_537 = __p2_537; \
-  uint32x4_t __ret_537; \
-  __ret_537 = __s0_537 + vmull_u16(vget_high_u16(__s1_537), splat_lane_u16(__s2_537, __p3_537)); \
+#define vmlaq_laneq_s32(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \
+  int32x4_t __s0_537 = __p0_537; \
+  int32x4_t __s1_537 = __p1_537; \
+  int32x4_t __s2_537 = __p2_537; \
+  int32x4_t __ret_537; \
+  __ret_537 = __s0_537 + __s1_537 * splatq_laneq_s32(__s2_537, __p3_537); \
   __ret_537; \
 })
 #else
-#define vmlal_high_lane_u16(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \
-  uint32x4_t __s0_538 = __p0_538; \
-  uint16x8_t __s1_538 = __p1_538; \
-  uint16x4_t __s2_538 = __p2_538; \
-  uint32x4_t __rev0_538;  __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 3, 2, 1, 0); \
-  uint16x8_t __rev1_538;  __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_538;  __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \
-  uint32x4_t __ret_538; \
-  __ret_538 = __rev0_538 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_538), __noswap_splat_lane_u16(__rev2_538, __p3_538)); \
+#define vmlaq_laneq_s32(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \
+  int32x4_t __s0_538 = __p0_538; \
+  int32x4_t __s1_538 = __p1_538; \
+  int32x4_t __s2_538 = __p2_538; \
+  int32x4_t __rev0_538;  __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 3, 2, 1, 0); \
+  int32x4_t __rev1_538;  __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 3, 2, 1, 0); \
+  int32x4_t __rev2_538;  __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \
+  int32x4_t __ret_538; \
+  __ret_538 = __rev0_538 + __rev1_538 * __noswap_splatq_laneq_s32(__rev2_538, __p3_538); \
   __ret_538 = __builtin_shufflevector(__ret_538, __ret_538, 3, 2, 1, 0); \
   __ret_538; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s32(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \
-  int64x2_t __s0_539 = __p0_539; \
-  int32x4_t __s1_539 = __p1_539; \
-  int32x2_t __s2_539 = __p2_539; \
-  int64x2_t __ret_539; \
-  __ret_539 = __s0_539 + vmull_s32(vget_high_s32(__s1_539), splat_lane_s32(__s2_539, __p3_539)); \
+#define vmlaq_laneq_s16(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \
+  int16x8_t __s0_539 = __p0_539; \
+  int16x8_t __s1_539 = __p1_539; \
+  int16x8_t __s2_539 = __p2_539; \
+  int16x8_t __ret_539; \
+  __ret_539 = __s0_539 + __s1_539 * splatq_laneq_s16(__s2_539, __p3_539); \
   __ret_539; \
 })
 #else
-#define vmlal_high_lane_s32(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \
-  int64x2_t __s0_540 = __p0_540; \
-  int32x4_t __s1_540 = __p1_540; \
-  int32x2_t __s2_540 = __p2_540; \
-  int64x2_t __rev0_540;  __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 1, 0); \
-  int32x4_t __rev1_540;  __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 3, 2, 1, 0); \
-  int32x2_t __rev2_540;  __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 1, 0); \
-  int64x2_t __ret_540; \
-  __ret_540 = __rev0_540 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_540), __noswap_splat_lane_s32(__rev2_540, __p3_540)); \
-  __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 1, 0); \
+#define vmlaq_laneq_s16(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \
+  int16x8_t __s0_540 = __p0_540; \
+  int16x8_t __s1_540 = __p1_540; \
+  int16x8_t __s2_540 = __p2_540; \
+  int16x8_t __rev0_540;  __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_540;  __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_540;  __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_540; \
+  __ret_540 = __rev0_540 + __rev1_540 * __noswap_splatq_laneq_s16(__rev2_540, __p3_540); \
+  __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_540; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s16(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \
-  int32x4_t __s0_541 = __p0_541; \
-  int16x8_t __s1_541 = __p1_541; \
-  int16x4_t __s2_541 = __p2_541; \
-  int32x4_t __ret_541; \
-  __ret_541 = __s0_541 + vmull_s16(vget_high_s16(__s1_541), splat_lane_s16(__s2_541, __p3_541)); \
+#define vmla_laneq_u32(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \
+  uint32x2_t __s0_541 = __p0_541; \
+  uint32x2_t __s1_541 = __p1_541; \
+  uint32x4_t __s2_541 = __p2_541; \
+  uint32x2_t __ret_541; \
+  __ret_541 = __s0_541 + __s1_541 * splat_laneq_u32(__s2_541, __p3_541); \
   __ret_541; \
 })
 #else
-#define vmlal_high_lane_s16(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \
-  int32x4_t __s0_542 = __p0_542; \
-  int16x8_t __s1_542 = __p1_542; \
-  int16x4_t __s2_542 = __p2_542; \
-  int32x4_t __rev0_542;  __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 3, 2, 1, 0); \
-  int16x8_t __rev1_542;  __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_542;  __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \
-  int32x4_t __ret_542; \
-  __ret_542 = __rev0_542 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_542), __noswap_splat_lane_s16(__rev2_542, __p3_542)); \
-  __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 3, 2, 1, 0); \
+#define vmla_laneq_u32(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \
+  uint32x2_t __s0_542 = __p0_542; \
+  uint32x2_t __s1_542 = __p1_542; \
+  uint32x4_t __s2_542 = __p2_542; \
+  uint32x2_t __rev0_542;  __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 1, 0); \
+  uint32x2_t __rev1_542;  __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 1, 0); \
+  uint32x4_t __rev2_542;  __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \
+  uint32x2_t __ret_542; \
+  __ret_542 = __rev0_542 + __rev1_542 * __noswap_splat_laneq_u32(__rev2_542, __p3_542); \
+  __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 1, 0); \
   __ret_542; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u32(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \
-  uint64x2_t __s0_543 = __p0_543; \
-  uint32x4_t __s1_543 = __p1_543; \
-  uint32x4_t __s2_543 = __p2_543; \
-  uint64x2_t __ret_543; \
-  __ret_543 = __s0_543 + vmull_u32(vget_high_u32(__s1_543), splat_laneq_u32(__s2_543, __p3_543)); \
+#define vmla_laneq_u16(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \
+  uint16x4_t __s0_543 = __p0_543; \
+  uint16x4_t __s1_543 = __p1_543; \
+  uint16x8_t __s2_543 = __p2_543; \
+  uint16x4_t __ret_543; \
+  __ret_543 = __s0_543 + __s1_543 * splat_laneq_u16(__s2_543, __p3_543); \
   __ret_543; \
 })
 #else
-#define vmlal_high_laneq_u32(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \
-  uint64x2_t __s0_544 = __p0_544; \
-  uint32x4_t __s1_544 = __p1_544; \
-  uint32x4_t __s2_544 = __p2_544; \
-  uint64x2_t __rev0_544;  __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 1, 0); \
-  uint32x4_t __rev1_544;  __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 3, 2, 1, 0); \
-  uint32x4_t __rev2_544;  __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 3, 2, 1, 0); \
-  uint64x2_t __ret_544; \
-  __ret_544 = __rev0_544 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_544), __noswap_splat_laneq_u32(__rev2_544, __p3_544)); \
-  __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 1, 0); \
+#define vmla_laneq_u16(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \
+  uint16x4_t __s0_544 = __p0_544; \
+  uint16x4_t __s1_544 = __p1_544; \
+  uint16x8_t __s2_544 = __p2_544; \
+  uint16x4_t __rev0_544;  __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 3, 2, 1, 0); \
+  uint16x4_t __rev1_544;  __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 3, 2, 1, 0); \
+  uint16x8_t __rev2_544;  __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_544; \
+  __ret_544 = __rev0_544 + __rev1_544 * __noswap_splat_laneq_u16(__rev2_544, __p3_544); \
+  __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 3, 2, 1, 0); \
   __ret_544; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u16(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \
-  uint32x4_t __s0_545 = __p0_545; \
-  uint16x8_t __s1_545 = __p1_545; \
-  uint16x8_t __s2_545 = __p2_545; \
-  uint32x4_t __ret_545; \
-  __ret_545 = __s0_545 + vmull_u16(vget_high_u16(__s1_545), splat_laneq_u16(__s2_545, __p3_545)); \
+#define vmla_laneq_f32(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \
+  float32x2_t __s0_545 = __p0_545; \
+  float32x2_t __s1_545 = __p1_545; \
+  float32x4_t __s2_545 = __p2_545; \
+  float32x2_t __ret_545; \
+  __ret_545 = __s0_545 + __s1_545 * splat_laneq_f32(__s2_545, __p3_545); \
   __ret_545; \
 })
 #else
-#define vmlal_high_laneq_u16(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \
-  uint32x4_t __s0_546 = __p0_546; \
-  uint16x8_t __s1_546 = __p1_546; \
-  uint16x8_t __s2_546 = __p2_546; \
-  uint32x4_t __rev0_546;  __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 3, 2, 1, 0); \
-  uint16x8_t __rev1_546;  __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_546;  __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_546; \
-  __ret_546 = __rev0_546 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_546), __noswap_splat_laneq_u16(__rev2_546, __p3_546)); \
-  __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 3, 2, 1, 0); \
+#define vmla_laneq_f32(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \
+  float32x2_t __s0_546 = __p0_546; \
+  float32x2_t __s1_546 = __p1_546; \
+  float32x4_t __s2_546 = __p2_546; \
+  float32x2_t __rev0_546;  __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 1, 0); \
+  float32x2_t __rev1_546;  __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 1, 0); \
+  float32x4_t __rev2_546;  __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 3, 2, 1, 0); \
+  float32x2_t __ret_546; \
+  __ret_546 = __rev0_546 + __rev1_546 * __noswap_splat_laneq_f32(__rev2_546, __p3_546); \
+  __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 1, 0); \
   __ret_546; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \
-  int64x2_t __s0_547 = __p0_547; \
-  int32x4_t __s1_547 = __p1_547; \
+#define vmla_laneq_s32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \
+  int32x2_t __s0_547 = __p0_547; \
+  int32x2_t __s1_547 = __p1_547; \
   int32x4_t __s2_547 = __p2_547; \
-  int64x2_t __ret_547; \
-  __ret_547 = __s0_547 + vmull_s32(vget_high_s32(__s1_547), splat_laneq_s32(__s2_547, __p3_547)); \
+  int32x2_t __ret_547; \
+  __ret_547 = __s0_547 + __s1_547 * splat_laneq_s32(__s2_547, __p3_547); \
   __ret_547; \
 })
 #else
-#define vmlal_high_laneq_s32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \
-  int64x2_t __s0_548 = __p0_548; \
-  int32x4_t __s1_548 = __p1_548; \
+#define vmla_laneq_s32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \
+  int32x2_t __s0_548 = __p0_548; \
+  int32x2_t __s1_548 = __p1_548; \
   int32x4_t __s2_548 = __p2_548; \
-  int64x2_t __rev0_548;  __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \
-  int32x4_t __rev1_548;  __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 3, 2, 1, 0); \
+  int32x2_t __rev0_548;  __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \
+  int32x2_t __rev1_548;  __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 1, 0); \
   int32x4_t __rev2_548;  __rev2_548 = __builtin_shufflevector(__s2_548, __s2_548, 3, 2, 1, 0); \
-  int64x2_t __ret_548; \
-  __ret_548 = __rev0_548 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_548), __noswap_splat_laneq_s32(__rev2_548, __p3_548)); \
+  int32x2_t __ret_548; \
+  __ret_548 = __rev0_548 + __rev1_548 * __noswap_splat_laneq_s32(__rev2_548, __p3_548); \
   __ret_548 = __builtin_shufflevector(__ret_548, __ret_548, 1, 0); \
   __ret_548; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \
-  int32x4_t __s0_549 = __p0_549; \
-  int16x8_t __s1_549 = __p1_549; \
+#define vmla_laneq_s16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \
+  int16x4_t __s0_549 = __p0_549; \
+  int16x4_t __s1_549 = __p1_549; \
   int16x8_t __s2_549 = __p2_549; \
-  int32x4_t __ret_549; \
-  __ret_549 = __s0_549 + vmull_s16(vget_high_s16(__s1_549), splat_laneq_s16(__s2_549, __p3_549)); \
+  int16x4_t __ret_549; \
+  __ret_549 = __s0_549 + __s1_549 * splat_laneq_s16(__s2_549, __p3_549); \
   __ret_549; \
 })
 #else
-#define vmlal_high_laneq_s16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \
-  int32x4_t __s0_550 = __p0_550; \
-  int16x8_t __s1_550 = __p1_550; \
+#define vmla_laneq_s16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \
+  int16x4_t __s0_550 = __p0_550; \
+  int16x4_t __s1_550 = __p1_550; \
   int16x8_t __s2_550 = __p2_550; \
-  int32x4_t __rev0_550;  __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \
-  int16x8_t __rev1_550;  __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev0_550;  __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \
+  int16x4_t __rev1_550;  __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 3, 2, 1, 0); \
   int16x8_t __rev2_550;  __rev2_550 = __builtin_shufflevector(__s2_550, __s2_550, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_550; \
-  __ret_550 = __rev0_550 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_550), __noswap_splat_laneq_s16(__rev2_550, __p3_550)); \
+  int16x4_t __ret_550; \
+  __ret_550 = __rev0_550 + __rev1_550 * __noswap_splat_laneq_s16(__rev2_550, __p3_550); \
   __ret_550 = __builtin_shufflevector(__ret_550, __ret_550, 3, 2, 1, 0); \
   __ret_550; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \
+#define vmlal_high_lane_u32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \
   uint64x2_t __s0_551 = __p0_551; \
-  uint32x2_t __s1_551 = __p1_551; \
-  uint32x4_t __s2_551 = __p2_551; \
+  uint32x4_t __s1_551 = __p1_551; \
+  uint32x2_t __s2_551 = __p2_551; \
   uint64x2_t __ret_551; \
-  __ret_551 = __s0_551 + vmull_u32(__s1_551, splat_laneq_u32(__s2_551, __p3_551)); \
+  __ret_551 = __s0_551 + vmull_u32(vget_high_u32(__s1_551), splat_lane_u32(__s2_551, __p3_551)); \
   __ret_551; \
 })
 #else
-#define vmlal_laneq_u32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \
+#define vmlal_high_lane_u32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \
   uint64x2_t __s0_552 = __p0_552; \
-  uint32x2_t __s1_552 = __p1_552; \
-  uint32x4_t __s2_552 = __p2_552; \
+  uint32x4_t __s1_552 = __p1_552; \
+  uint32x2_t __s2_552 = __p2_552; \
   uint64x2_t __rev0_552;  __rev0_552 = __builtin_shufflevector(__s0_552, __s0_552, 1, 0); \
-  uint32x2_t __rev1_552;  __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 1, 0); \
-  uint32x4_t __rev2_552;  __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 3, 2, 1, 0); \
+  uint32x4_t __rev1_552;  __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 3, 2, 1, 0); \
+  uint32x2_t __rev2_552;  __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 1, 0); \
   uint64x2_t __ret_552; \
-  __ret_552 = __rev0_552 + __noswap_vmull_u32(__rev1_552, __noswap_splat_laneq_u32(__rev2_552, __p3_552)); \
+  __ret_552 = __rev0_552 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_552), __noswap_splat_lane_u32(__rev2_552, __p3_552)); \
   __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 1, 0); \
   __ret_552; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \
+#define vmlal_high_lane_u16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \
   uint32x4_t __s0_553 = __p0_553; \
-  uint16x4_t __s1_553 = __p1_553; \
-  uint16x8_t __s2_553 = __p2_553; \
+  uint16x8_t __s1_553 = __p1_553; \
+  uint16x4_t __s2_553 = __p2_553; \
   uint32x4_t __ret_553; \
-  __ret_553 = __s0_553 + vmull_u16(__s1_553, splat_laneq_u16(__s2_553, __p3_553)); \
+  __ret_553 = __s0_553 + vmull_u16(vget_high_u16(__s1_553), splat_lane_u16(__s2_553, __p3_553)); \
   __ret_553; \
 })
 #else
-#define vmlal_laneq_u16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \
+#define vmlal_high_lane_u16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \
   uint32x4_t __s0_554 = __p0_554; \
-  uint16x4_t __s1_554 = __p1_554; \
-  uint16x8_t __s2_554 = __p2_554; \
+  uint16x8_t __s1_554 = __p1_554; \
+  uint16x4_t __s2_554 = __p2_554; \
   uint32x4_t __rev0_554;  __rev0_554 = __builtin_shufflevector(__s0_554, __s0_554, 3, 2, 1, 0); \
-  uint16x4_t __rev1_554;  __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 3, 2, 1, 0); \
-  uint16x8_t __rev2_554;  __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_554;  __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_554;  __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 3, 2, 1, 0); \
   uint32x4_t __ret_554; \
-  __ret_554 = __rev0_554 + __noswap_vmull_u16(__rev1_554, __noswap_splat_laneq_u16(__rev2_554, __p3_554)); \
+  __ret_554 = __rev0_554 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_554), __noswap_splat_lane_u16(__rev2_554, __p3_554)); \
   __ret_554 = __builtin_shufflevector(__ret_554, __ret_554, 3, 2, 1, 0); \
   __ret_554; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \
+#define vmlal_high_lane_s32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \
   int64x2_t __s0_555 = __p0_555; \
-  int32x2_t __s1_555 = __p1_555; \
-  int32x4_t __s2_555 = __p2_555; \
+  int32x4_t __s1_555 = __p1_555; \
+  int32x2_t __s2_555 = __p2_555; \
   int64x2_t __ret_555; \
-  __ret_555 = __s0_555 + vmull_s32(__s1_555, splat_laneq_s32(__s2_555, __p3_555)); \
+  __ret_555 = __s0_555 + vmull_s32(vget_high_s32(__s1_555), splat_lane_s32(__s2_555, __p3_555)); \
   __ret_555; \
 })
 #else
-#define vmlal_laneq_s32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \
+#define vmlal_high_lane_s32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \
   int64x2_t __s0_556 = __p0_556; \
-  int32x2_t __s1_556 = __p1_556; \
-  int32x4_t __s2_556 = __p2_556; \
+  int32x4_t __s1_556 = __p1_556; \
+  int32x2_t __s2_556 = __p2_556; \
   int64x2_t __rev0_556;  __rev0_556 = __builtin_shufflevector(__s0_556, __s0_556, 1, 0); \
-  int32x2_t __rev1_556;  __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 1, 0); \
-  int32x4_t __rev2_556;  __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 3, 2, 1, 0); \
+  int32x4_t __rev1_556;  __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 3, 2, 1, 0); \
+  int32x2_t __rev2_556;  __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 1, 0); \
   int64x2_t __ret_556; \
-  __ret_556 = __rev0_556 + __noswap_vmull_s32(__rev1_556, __noswap_splat_laneq_s32(__rev2_556, __p3_556)); \
+  __ret_556 = __rev0_556 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_556), __noswap_splat_lane_s32(__rev2_556, __p3_556)); \
   __ret_556 = __builtin_shufflevector(__ret_556, __ret_556, 1, 0); \
   __ret_556; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \
+#define vmlal_high_lane_s16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \
   int32x4_t __s0_557 = __p0_557; \
-  int16x4_t __s1_557 = __p1_557; \
-  int16x8_t __s2_557 = __p2_557; \
+  int16x8_t __s1_557 = __p1_557; \
+  int16x4_t __s2_557 = __p2_557; \
   int32x4_t __ret_557; \
-  __ret_557 = __s0_557 + vmull_s16(__s1_557, splat_laneq_s16(__s2_557, __p3_557)); \
+  __ret_557 = __s0_557 + vmull_s16(vget_high_s16(__s1_557), splat_lane_s16(__s2_557, __p3_557)); \
   __ret_557; \
 })
 #else
-#define vmlal_laneq_s16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \
+#define vmlal_high_lane_s16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \
   int32x4_t __s0_558 = __p0_558; \
-  int16x4_t __s1_558 = __p1_558; \
-  int16x8_t __s2_558 = __p2_558; \
+  int16x8_t __s1_558 = __p1_558; \
+  int16x4_t __s2_558 = __p2_558; \
   int32x4_t __rev0_558;  __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 3, 2, 1, 0); \
-  int16x4_t __rev1_558;  __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 3, 2, 1, 0); \
-  int16x8_t __rev2_558;  __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_558;  __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_558;  __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 3, 2, 1, 0); \
   int32x4_t __ret_558; \
-  __ret_558 = __rev0_558 + __noswap_vmull_s16(__rev1_558, __noswap_splat_laneq_s16(__rev2_558, __p3_558)); \
+  __ret_558 = __rev0_558 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_558), __noswap_splat_lane_s16(__rev2_558, __p3_558)); \
   __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 3, 2, 1, 0); \
   __ret_558; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_u32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \
+  uint64x2_t __s0_559 = __p0_559; \
+  uint32x4_t __s1_559 = __p1_559; \
+  uint32x4_t __s2_559 = __p2_559; \
+  uint64x2_t __ret_559; \
+  __ret_559 = __s0_559 + vmull_u32(vget_high_u32(__s1_559), splat_laneq_u32(__s2_559, __p3_559)); \
+  __ret_559; \
+})
+#else
+#define vmlal_high_laneq_u32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \
+  uint64x2_t __s0_560 = __p0_560; \
+  uint32x4_t __s1_560 = __p1_560; \
+  uint32x4_t __s2_560 = __p2_560; \
+  uint64x2_t __rev0_560;  __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 1, 0); \
+  uint32x4_t __rev1_560;  __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \
+  uint32x4_t __rev2_560;  __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \
+  uint64x2_t __ret_560; \
+  __ret_560 = __rev0_560 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_560), __noswap_splat_laneq_u32(__rev2_560, __p3_560)); \
+  __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 1, 0); \
+  __ret_560; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_u16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \
+  uint32x4_t __s0_561 = __p0_561; \
+  uint16x8_t __s1_561 = __p1_561; \
+  uint16x8_t __s2_561 = __p2_561; \
+  uint32x4_t __ret_561; \
+  __ret_561 = __s0_561 + vmull_u16(vget_high_u16(__s1_561), splat_laneq_u16(__s2_561, __p3_561)); \
+  __ret_561; \
+})
+#else
+#define vmlal_high_laneq_u16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \
+  uint32x4_t __s0_562 = __p0_562; \
+  uint16x8_t __s1_562 = __p1_562; \
+  uint16x8_t __s2_562 = __p2_562; \
+  uint32x4_t __rev0_562;  __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 3, 2, 1, 0); \
+  uint16x8_t __rev1_562;  __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_562;  __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_562; \
+  __ret_562 = __rev0_562 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_562), __noswap_splat_laneq_u16(__rev2_562, __p3_562)); \
+  __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 3, 2, 1, 0); \
+  __ret_562; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_s32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \
+  int64x2_t __s0_563 = __p0_563; \
+  int32x4_t __s1_563 = __p1_563; \
+  int32x4_t __s2_563 = __p2_563; \
+  int64x2_t __ret_563; \
+  __ret_563 = __s0_563 + vmull_s32(vget_high_s32(__s1_563), splat_laneq_s32(__s2_563, __p3_563)); \
+  __ret_563; \
+})
+#else
+#define vmlal_high_laneq_s32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \
+  int64x2_t __s0_564 = __p0_564; \
+  int32x4_t __s1_564 = __p1_564; \
+  int32x4_t __s2_564 = __p2_564; \
+  int64x2_t __rev0_564;  __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 1, 0); \
+  int32x4_t __rev1_564;  __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \
+  int32x4_t __rev2_564;  __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \
+  int64x2_t __ret_564; \
+  __ret_564 = __rev0_564 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_564), __noswap_splat_laneq_s32(__rev2_564, __p3_564)); \
+  __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 1, 0); \
+  __ret_564; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_s16(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \
+  int32x4_t __s0_565 = __p0_565; \
+  int16x8_t __s1_565 = __p1_565; \
+  int16x8_t __s2_565 = __p2_565; \
+  int32x4_t __ret_565; \
+  __ret_565 = __s0_565 + vmull_s16(vget_high_s16(__s1_565), splat_laneq_s16(__s2_565, __p3_565)); \
+  __ret_565; \
+})
+#else
+#define vmlal_high_laneq_s16(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \
+  int32x4_t __s0_566 = __p0_566; \
+  int16x8_t __s1_566 = __p1_566; \
+  int16x8_t __s2_566 = __p2_566; \
+  int32x4_t __rev0_566;  __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \
+  int16x8_t __rev1_566;  __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_566;  __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_566; \
+  __ret_566 = __rev0_566 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_566), __noswap_splat_laneq_s16(__rev2_566, __p3_566)); \
+  __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \
+  __ret_566; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_u32(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \
+  uint64x2_t __s0_567 = __p0_567; \
+  uint32x2_t __s1_567 = __p1_567; \
+  uint32x4_t __s2_567 = __p2_567; \
+  uint64x2_t __ret_567; \
+  __ret_567 = __s0_567 + vmull_u32(__s1_567, splat_laneq_u32(__s2_567, __p3_567)); \
+  __ret_567; \
+})
+#else
+#define vmlal_laneq_u32(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \
+  uint64x2_t __s0_568 = __p0_568; \
+  uint32x2_t __s1_568 = __p1_568; \
+  uint32x4_t __s2_568 = __p2_568; \
+  uint64x2_t __rev0_568;  __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 1, 0); \
+  uint32x2_t __rev1_568;  __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 1, 0); \
+  uint32x4_t __rev2_568;  __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 3, 2, 1, 0); \
+  uint64x2_t __ret_568; \
+  __ret_568 = __rev0_568 + __noswap_vmull_u32(__rev1_568, __noswap_splat_laneq_u32(__rev2_568, __p3_568)); \
+  __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 1, 0); \
+  __ret_568; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_u16(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \
+  uint32x4_t __s0_569 = __p0_569; \
+  uint16x4_t __s1_569 = __p1_569; \
+  uint16x8_t __s2_569 = __p2_569; \
+  uint32x4_t __ret_569; \
+  __ret_569 = __s0_569 + vmull_u16(__s1_569, splat_laneq_u16(__s2_569, __p3_569)); \
+  __ret_569; \
+})
+#else
+#define vmlal_laneq_u16(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \
+  uint32x4_t __s0_570 = __p0_570; \
+  uint16x4_t __s1_570 = __p1_570; \
+  uint16x8_t __s2_570 = __p2_570; \
+  uint32x4_t __rev0_570;  __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 3, 2, 1, 0); \
+  uint16x4_t __rev1_570;  __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 3, 2, 1, 0); \
+  uint16x8_t __rev2_570;  __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_570; \
+  __ret_570 = __rev0_570 + __noswap_vmull_u16(__rev1_570, __noswap_splat_laneq_u16(__rev2_570, __p3_570)); \
+  __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 3, 2, 1, 0); \
+  __ret_570; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_s32(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \
+  int64x2_t __s0_571 = __p0_571; \
+  int32x2_t __s1_571 = __p1_571; \
+  int32x4_t __s2_571 = __p2_571; \
+  int64x2_t __ret_571; \
+  __ret_571 = __s0_571 + vmull_s32(__s1_571, splat_laneq_s32(__s2_571, __p3_571)); \
+  __ret_571; \
+})
+#else
+#define vmlal_laneq_s32(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \
+  int64x2_t __s0_572 = __p0_572; \
+  int32x2_t __s1_572 = __p1_572; \
+  int32x4_t __s2_572 = __p2_572; \
+  int64x2_t __rev0_572;  __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 1, 0); \
+  int32x2_t __rev1_572;  __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 1, 0); \
+  int32x4_t __rev2_572;  __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 3, 2, 1, 0); \
+  int64x2_t __ret_572; \
+  __ret_572 = __rev0_572 + __noswap_vmull_s32(__rev1_572, __noswap_splat_laneq_s32(__rev2_572, __p3_572)); \
+  __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 1, 0); \
+  __ret_572; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_s16(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \
+  int32x4_t __s0_573 = __p0_573; \
+  int16x4_t __s1_573 = __p1_573; \
+  int16x8_t __s2_573 = __p2_573; \
+  int32x4_t __ret_573; \
+  __ret_573 = __s0_573 + vmull_s16(__s1_573, splat_laneq_s16(__s2_573, __p3_573)); \
+  __ret_573; \
+})
+#else
+#define vmlal_laneq_s16(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \
+  int32x4_t __s0_574 = __p0_574; \
+  int16x4_t __s1_574 = __p1_574; \
+  int16x8_t __s2_574 = __p2_574; \
+  int32x4_t __rev0_574;  __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 3, 2, 1, 0); \
+  int16x4_t __rev1_574;  __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 3, 2, 1, 0); \
+  int16x8_t __rev2_574;  __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_574; \
+  __ret_574 = __rev0_574 + __noswap_vmull_s16(__rev1_574, __noswap_splat_laneq_s16(__rev2_574, __p3_574)); \
+  __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 3, 2, 1, 0); \
+  __ret_574; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
   float64x2_t __ret;
   __ret = __p0 - __p1 * __p2;
@@ -55715,533 +55903,533 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \
-  uint32x4_t __s0_559 = __p0_559; \
-  uint32x4_t __s1_559 = __p1_559; \
-  uint32x4_t __s2_559 = __p2_559; \
-  uint32x4_t __ret_559; \
-  __ret_559 = __s0_559 - __s1_559 * splatq_laneq_u32(__s2_559, __p3_559); \
-  __ret_559; \
-})
-#else
-#define vmlsq_laneq_u32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \
-  uint32x4_t __s0_560 = __p0_560; \
-  uint32x4_t __s1_560 = __p1_560; \
-  uint32x4_t __s2_560 = __p2_560; \
-  uint32x4_t __rev0_560;  __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 3, 2, 1, 0); \
-  uint32x4_t __rev1_560;  __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \
-  uint32x4_t __rev2_560;  __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \
-  uint32x4_t __ret_560; \
-  __ret_560 = __rev0_560 - __rev1_560 * __noswap_splatq_laneq_u32(__rev2_560, __p3_560); \
-  __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 3, 2, 1, 0); \
-  __ret_560; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \
-  uint16x8_t __s0_561 = __p0_561; \
-  uint16x8_t __s1_561 = __p1_561; \
-  uint16x8_t __s2_561 = __p2_561; \
-  uint16x8_t __ret_561; \
-  __ret_561 = __s0_561 - __s1_561 * splatq_laneq_u16(__s2_561, __p3_561); \
-  __ret_561; \
-})
-#else
-#define vmlsq_laneq_u16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \
-  uint16x8_t __s0_562 = __p0_562; \
-  uint16x8_t __s1_562 = __p1_562; \
-  uint16x8_t __s2_562 = __p2_562; \
-  uint16x8_t __rev0_562;  __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_562;  __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_562;  __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_562; \
-  __ret_562 = __rev0_562 - __rev1_562 * __noswap_splatq_laneq_u16(__rev2_562, __p3_562); \
-  __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_562; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_f32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \
-  float32x4_t __s0_563 = __p0_563; \
-  float32x4_t __s1_563 = __p1_563; \
-  float32x4_t __s2_563 = __p2_563; \
-  float32x4_t __ret_563; \
-  __ret_563 = __s0_563 - __s1_563 * splatq_laneq_f32(__s2_563, __p3_563); \
-  __ret_563; \
-})
-#else
-#define vmlsq_laneq_f32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \
-  float32x4_t __s0_564 = __p0_564; \
-  float32x4_t __s1_564 = __p1_564; \
-  float32x4_t __s2_564 = __p2_564; \
-  float32x4_t __rev0_564;  __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 3, 2, 1, 0); \
-  float32x4_t __rev1_564;  __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \
-  float32x4_t __rev2_564;  __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \
-  float32x4_t __ret_564; \
-  __ret_564 = __rev0_564 - __rev1_564 * __noswap_splatq_laneq_f32(__rev2_564, __p3_564); \
-  __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 3, 2, 1, 0); \
-  __ret_564; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s32(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \
-  int32x4_t __s0_565 = __p0_565; \
-  int32x4_t __s1_565 = __p1_565; \
-  int32x4_t __s2_565 = __p2_565; \
-  int32x4_t __ret_565; \
-  __ret_565 = __s0_565 - __s1_565 * splatq_laneq_s32(__s2_565, __p3_565); \
-  __ret_565; \
-})
-#else
-#define vmlsq_laneq_s32(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \
-  int32x4_t __s0_566 = __p0_566; \
-  int32x4_t __s1_566 = __p1_566; \
-  int32x4_t __s2_566 = __p2_566; \
-  int32x4_t __rev0_566;  __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \
-  int32x4_t __rev1_566;  __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 3, 2, 1, 0); \
-  int32x4_t __rev2_566;  __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 3, 2, 1, 0); \
-  int32x4_t __ret_566; \
-  __ret_566 = __rev0_566 - __rev1_566 * __noswap_splatq_laneq_s32(__rev2_566, __p3_566); \
-  __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \
-  __ret_566; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s16(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \
-  int16x8_t __s0_567 = __p0_567; \
-  int16x8_t __s1_567 = __p1_567; \
-  int16x8_t __s2_567 = __p2_567; \
-  int16x8_t __ret_567; \
-  __ret_567 = __s0_567 - __s1_567 * splatq_laneq_s16(__s2_567, __p3_567); \
-  __ret_567; \
-})
-#else
-#define vmlsq_laneq_s16(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \
-  int16x8_t __s0_568 = __p0_568; \
-  int16x8_t __s1_568 = __p1_568; \
-  int16x8_t __s2_568 = __p2_568; \
-  int16x8_t __rev0_568;  __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_568;  __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_568;  __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_568; \
-  __ret_568 = __rev0_568 - __rev1_568 * __noswap_splatq_laneq_s16(__rev2_568, __p3_568); \
-  __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_568; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u32(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \
-  uint32x2_t __s0_569 = __p0_569; \
-  uint32x2_t __s1_569 = __p1_569; \
-  uint32x4_t __s2_569 = __p2_569; \
-  uint32x2_t __ret_569; \
-  __ret_569 = __s0_569 - __s1_569 * splat_laneq_u32(__s2_569, __p3_569); \
-  __ret_569; \
-})
-#else
-#define vmls_laneq_u32(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \
-  uint32x2_t __s0_570 = __p0_570; \
-  uint32x2_t __s1_570 = __p1_570; \
-  uint32x4_t __s2_570 = __p2_570; \
-  uint32x2_t __rev0_570;  __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 1, 0); \
-  uint32x2_t __rev1_570;  __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 1, 0); \
-  uint32x4_t __rev2_570;  __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 3, 2, 1, 0); \
-  uint32x2_t __ret_570; \
-  __ret_570 = __rev0_570 - __rev1_570 * __noswap_splat_laneq_u32(__rev2_570, __p3_570); \
-  __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 1, 0); \
-  __ret_570; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u16(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \
-  uint16x4_t __s0_571 = __p0_571; \
-  uint16x4_t __s1_571 = __p1_571; \
-  uint16x8_t __s2_571 = __p2_571; \
-  uint16x4_t __ret_571; \
-  __ret_571 = __s0_571 - __s1_571 * splat_laneq_u16(__s2_571, __p3_571); \
-  __ret_571; \
-})
-#else
-#define vmls_laneq_u16(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \
-  uint16x4_t __s0_572 = __p0_572; \
-  uint16x4_t __s1_572 = __p1_572; \
-  uint16x8_t __s2_572 = __p2_572; \
-  uint16x4_t __rev0_572;  __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 3, 2, 1, 0); \
-  uint16x4_t __rev1_572;  __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 3, 2, 1, 0); \
-  uint16x8_t __rev2_572;  __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_572; \
-  __ret_572 = __rev0_572 - __rev1_572 * __noswap_splat_laneq_u16(__rev2_572, __p3_572); \
-  __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 3, 2, 1, 0); \
-  __ret_572; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_f32(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \
-  float32x2_t __s0_573 = __p0_573; \
-  float32x2_t __s1_573 = __p1_573; \
-  float32x4_t __s2_573 = __p2_573; \
-  float32x2_t __ret_573; \
-  __ret_573 = __s0_573 - __s1_573 * splat_laneq_f32(__s2_573, __p3_573); \
-  __ret_573; \
-})
-#else
-#define vmls_laneq_f32(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \
-  float32x2_t __s0_574 = __p0_574; \
-  float32x2_t __s1_574 = __p1_574; \
-  float32x4_t __s2_574 = __p2_574; \
-  float32x2_t __rev0_574;  __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 1, 0); \
-  float32x2_t __rev1_574;  __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 1, 0); \
-  float32x4_t __rev2_574;  __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 3, 2, 1, 0); \
-  float32x2_t __ret_574; \
-  __ret_574 = __rev0_574 - __rev1_574 * __noswap_splat_laneq_f32(__rev2_574, __p3_574); \
-  __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 1, 0); \
-  __ret_574; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \
-  int32x2_t __s0_575 = __p0_575; \
-  int32x2_t __s1_575 = __p1_575; \
-  int32x4_t __s2_575 = __p2_575; \
-  int32x2_t __ret_575; \
-  __ret_575 = __s0_575 - __s1_575 * splat_laneq_s32(__s2_575, __p3_575); \
+#define vmlsq_laneq_u32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \
+  uint32x4_t __s0_575 = __p0_575; \
+  uint32x4_t __s1_575 = __p1_575; \
+  uint32x4_t __s2_575 = __p2_575; \
+  uint32x4_t __ret_575; \
+  __ret_575 = __s0_575 - __s1_575 * splatq_laneq_u32(__s2_575, __p3_575); \
   __ret_575; \
 })
 #else
-#define vmls_laneq_s32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \
-  int32x2_t __s0_576 = __p0_576; \
-  int32x2_t __s1_576 = __p1_576; \
-  int32x4_t __s2_576 = __p2_576; \
-  int32x2_t __rev0_576;  __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 1, 0); \
-  int32x2_t __rev1_576;  __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 1, 0); \
-  int32x4_t __rev2_576;  __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \
-  int32x2_t __ret_576; \
-  __ret_576 = __rev0_576 - __rev1_576 * __noswap_splat_laneq_s32(__rev2_576, __p3_576); \
-  __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 1, 0); \
+#define vmlsq_laneq_u32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \
+  uint32x4_t __s0_576 = __p0_576; \
+  uint32x4_t __s1_576 = __p1_576; \
+  uint32x4_t __s2_576 = __p2_576; \
+  uint32x4_t __rev0_576;  __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 3, 2, 1, 0); \
+  uint32x4_t __rev1_576;  __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 3, 2, 1, 0); \
+  uint32x4_t __rev2_576;  __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \
+  uint32x4_t __ret_576; \
+  __ret_576 = __rev0_576 - __rev1_576 * __noswap_splatq_laneq_u32(__rev2_576, __p3_576); \
+  __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 3, 2, 1, 0); \
   __ret_576; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s16(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \
-  int16x4_t __s0_577 = __p0_577; \
-  int16x4_t __s1_577 = __p1_577; \
-  int16x8_t __s2_577 = __p2_577; \
-  int16x4_t __ret_577; \
-  __ret_577 = __s0_577 - __s1_577 * splat_laneq_s16(__s2_577, __p3_577); \
+#define vmlsq_laneq_u16(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \
+  uint16x8_t __s0_577 = __p0_577; \
+  uint16x8_t __s1_577 = __p1_577; \
+  uint16x8_t __s2_577 = __p2_577; \
+  uint16x8_t __ret_577; \
+  __ret_577 = __s0_577 - __s1_577 * splatq_laneq_u16(__s2_577, __p3_577); \
   __ret_577; \
 })
 #else
-#define vmls_laneq_s16(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \
-  int16x4_t __s0_578 = __p0_578; \
-  int16x4_t __s1_578 = __p1_578; \
-  int16x8_t __s2_578 = __p2_578; \
-  int16x4_t __rev0_578;  __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 3, 2, 1, 0); \
-  int16x4_t __rev1_578;  __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 3, 2, 1, 0); \
-  int16x8_t __rev2_578;  __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_578; \
-  __ret_578 = __rev0_578 - __rev1_578 * __noswap_splat_laneq_s16(__rev2_578, __p3_578); \
-  __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 3, 2, 1, 0); \
+#define vmlsq_laneq_u16(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \
+  uint16x8_t __s0_578 = __p0_578; \
+  uint16x8_t __s1_578 = __p1_578; \
+  uint16x8_t __s2_578 = __p2_578; \
+  uint16x8_t __rev0_578;  __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_578;  __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_578;  __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_578; \
+  __ret_578 = __rev0_578 - __rev1_578 * __noswap_splatq_laneq_u16(__rev2_578, __p3_578); \
+  __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_578; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u32(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \
-  uint64x2_t __s0_579 = __p0_579; \
-  uint32x4_t __s1_579 = __p1_579; \
-  uint32x2_t __s2_579 = __p2_579; \
-  uint64x2_t __ret_579; \
-  __ret_579 = __s0_579 - vmull_u32(vget_high_u32(__s1_579), splat_lane_u32(__s2_579, __p3_579)); \
+#define vmlsq_laneq_f32(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \
+  float32x4_t __s0_579 = __p0_579; \
+  float32x4_t __s1_579 = __p1_579; \
+  float32x4_t __s2_579 = __p2_579; \
+  float32x4_t __ret_579; \
+  __ret_579 = __s0_579 - __s1_579 * splatq_laneq_f32(__s2_579, __p3_579); \
   __ret_579; \
 })
 #else
-#define vmlsl_high_lane_u32(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \
-  uint64x2_t __s0_580 = __p0_580; \
-  uint32x4_t __s1_580 = __p1_580; \
-  uint32x2_t __s2_580 = __p2_580; \
-  uint64x2_t __rev0_580;  __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 1, 0); \
-  uint32x4_t __rev1_580;  __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \
-  uint32x2_t __rev2_580;  __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 1, 0); \
-  uint64x2_t __ret_580; \
-  __ret_580 = __rev0_580 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_580), __noswap_splat_lane_u32(__rev2_580, __p3_580)); \
-  __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 1, 0); \
+#define vmlsq_laneq_f32(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \
+  float32x4_t __s0_580 = __p0_580; \
+  float32x4_t __s1_580 = __p1_580; \
+  float32x4_t __s2_580 = __p2_580; \
+  float32x4_t __rev0_580;  __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 3, 2, 1, 0); \
+  float32x4_t __rev1_580;  __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \
+  float32x4_t __rev2_580;  __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 3, 2, 1, 0); \
+  float32x4_t __ret_580; \
+  __ret_580 = __rev0_580 - __rev1_580 * __noswap_splatq_laneq_f32(__rev2_580, __p3_580); \
+  __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 3, 2, 1, 0); \
   __ret_580; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u16(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \
-  uint32x4_t __s0_581 = __p0_581; \
-  uint16x8_t __s1_581 = __p1_581; \
-  uint16x4_t __s2_581 = __p2_581; \
-  uint32x4_t __ret_581; \
-  __ret_581 = __s0_581 - vmull_u16(vget_high_u16(__s1_581), splat_lane_u16(__s2_581, __p3_581)); \
+#define vmlsq_laneq_s32(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \
+  int32x4_t __s0_581 = __p0_581; \
+  int32x4_t __s1_581 = __p1_581; \
+  int32x4_t __s2_581 = __p2_581; \
+  int32x4_t __ret_581; \
+  __ret_581 = __s0_581 - __s1_581 * splatq_laneq_s32(__s2_581, __p3_581); \
   __ret_581; \
 })
 #else
-#define vmlsl_high_lane_u16(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \
-  uint32x4_t __s0_582 = __p0_582; \
-  uint16x8_t __s1_582 = __p1_582; \
-  uint16x4_t __s2_582 = __p2_582; \
-  uint32x4_t __rev0_582;  __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 3, 2, 1, 0); \
-  uint16x8_t __rev1_582;  __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_582;  __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \
-  uint32x4_t __ret_582; \
-  __ret_582 = __rev0_582 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_582), __noswap_splat_lane_u16(__rev2_582, __p3_582)); \
+#define vmlsq_laneq_s32(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \
+  int32x4_t __s0_582 = __p0_582; \
+  int32x4_t __s1_582 = __p1_582; \
+  int32x4_t __s2_582 = __p2_582; \
+  int32x4_t __rev0_582;  __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 3, 2, 1, 0); \
+  int32x4_t __rev1_582;  __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 3, 2, 1, 0); \
+  int32x4_t __rev2_582;  __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \
+  int32x4_t __ret_582; \
+  __ret_582 = __rev0_582 - __rev1_582 * __noswap_splatq_laneq_s32(__rev2_582, __p3_582); \
   __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 3, 2, 1, 0); \
   __ret_582; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s32(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \
-  int64x2_t __s0_583 = __p0_583; \
-  int32x4_t __s1_583 = __p1_583; \
-  int32x2_t __s2_583 = __p2_583; \
-  int64x2_t __ret_583; \
-  __ret_583 = __s0_583 - vmull_s32(vget_high_s32(__s1_583), splat_lane_s32(__s2_583, __p3_583)); \
+#define vmlsq_laneq_s16(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \
+  int16x8_t __s0_583 = __p0_583; \
+  int16x8_t __s1_583 = __p1_583; \
+  int16x8_t __s2_583 = __p2_583; \
+  int16x8_t __ret_583; \
+  __ret_583 = __s0_583 - __s1_583 * splatq_laneq_s16(__s2_583, __p3_583); \
   __ret_583; \
 })
 #else
-#define vmlsl_high_lane_s32(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \
-  int64x2_t __s0_584 = __p0_584; \
-  int32x4_t __s1_584 = __p1_584; \
-  int32x2_t __s2_584 = __p2_584; \
-  int64x2_t __rev0_584;  __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 1, 0); \
-  int32x4_t __rev1_584;  __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 3, 2, 1, 0); \
-  int32x2_t __rev2_584;  __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 1, 0); \
-  int64x2_t __ret_584; \
-  __ret_584 = __rev0_584 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_584), __noswap_splat_lane_s32(__rev2_584, __p3_584)); \
-  __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 1, 0); \
+#define vmlsq_laneq_s16(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \
+  int16x8_t __s0_584 = __p0_584; \
+  int16x8_t __s1_584 = __p1_584; \
+  int16x8_t __s2_584 = __p2_584; \
+  int16x8_t __rev0_584;  __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_584;  __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_584;  __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_584; \
+  __ret_584 = __rev0_584 - __rev1_584 * __noswap_splatq_laneq_s16(__rev2_584, __p3_584); \
+  __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_584; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s16(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \
-  int32x4_t __s0_585 = __p0_585; \
-  int16x8_t __s1_585 = __p1_585; \
-  int16x4_t __s2_585 = __p2_585; \
-  int32x4_t __ret_585; \
-  __ret_585 = __s0_585 - vmull_s16(vget_high_s16(__s1_585), splat_lane_s16(__s2_585, __p3_585)); \
+#define vmls_laneq_u32(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \
+  uint32x2_t __s0_585 = __p0_585; \
+  uint32x2_t __s1_585 = __p1_585; \
+  uint32x4_t __s2_585 = __p2_585; \
+  uint32x2_t __ret_585; \
+  __ret_585 = __s0_585 - __s1_585 * splat_laneq_u32(__s2_585, __p3_585); \
   __ret_585; \
 })
 #else
-#define vmlsl_high_lane_s16(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \
-  int32x4_t __s0_586 = __p0_586; \
-  int16x8_t __s1_586 = __p1_586; \
-  int16x4_t __s2_586 = __p2_586; \
-  int32x4_t __rev0_586;  __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 3, 2, 1, 0); \
-  int16x8_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_586;  __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \
-  int32x4_t __ret_586; \
-  __ret_586 = __rev0_586 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_586), __noswap_splat_lane_s16(__rev2_586, __p3_586)); \
-  __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 3, 2, 1, 0); \
+#define vmls_laneq_u32(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \
+  uint32x2_t __s0_586 = __p0_586; \
+  uint32x2_t __s1_586 = __p1_586; \
+  uint32x4_t __s2_586 = __p2_586; \
+  uint32x2_t __rev0_586;  __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 1, 0); \
+  uint32x2_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 1, 0); \
+  uint32x4_t __rev2_586;  __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \
+  uint32x2_t __ret_586; \
+  __ret_586 = __rev0_586 - __rev1_586 * __noswap_splat_laneq_u32(__rev2_586, __p3_586); \
+  __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 1, 0); \
   __ret_586; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u32(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \
-  uint64x2_t __s0_587 = __p0_587; \
-  uint32x4_t __s1_587 = __p1_587; \
-  uint32x4_t __s2_587 = __p2_587; \
-  uint64x2_t __ret_587; \
-  __ret_587 = __s0_587 - vmull_u32(vget_high_u32(__s1_587), splat_laneq_u32(__s2_587, __p3_587)); \
+#define vmls_laneq_u16(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \
+  uint16x4_t __s0_587 = __p0_587; \
+  uint16x4_t __s1_587 = __p1_587; \
+  uint16x8_t __s2_587 = __p2_587; \
+  uint16x4_t __ret_587; \
+  __ret_587 = __s0_587 - __s1_587 * splat_laneq_u16(__s2_587, __p3_587); \
   __ret_587; \
 })
 #else
-#define vmlsl_high_laneq_u32(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \
-  uint64x2_t __s0_588 = __p0_588; \
-  uint32x4_t __s1_588 = __p1_588; \
-  uint32x4_t __s2_588 = __p2_588; \
-  uint64x2_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 1, 0); \
-  uint32x4_t __rev1_588;  __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \
-  uint32x4_t __rev2_588;  __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 3, 2, 1, 0); \
-  uint64x2_t __ret_588; \
-  __ret_588 = __rev0_588 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_588), __noswap_splat_laneq_u32(__rev2_588, __p3_588)); \
-  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 1, 0); \
+#define vmls_laneq_u16(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \
+  uint16x4_t __s0_588 = __p0_588; \
+  uint16x4_t __s1_588 = __p1_588; \
+  uint16x8_t __s2_588 = __p2_588; \
+  uint16x4_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 3, 2, 1, 0); \
+  uint16x4_t __rev1_588;  __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \
+  uint16x8_t __rev2_588;  __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_588; \
+  __ret_588 = __rev0_588 - __rev1_588 * __noswap_splat_laneq_u16(__rev2_588, __p3_588); \
+  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 3, 2, 1, 0); \
   __ret_588; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u16(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \
-  uint32x4_t __s0_589 = __p0_589; \
-  uint16x8_t __s1_589 = __p1_589; \
-  uint16x8_t __s2_589 = __p2_589; \
-  uint32x4_t __ret_589; \
-  __ret_589 = __s0_589 - vmull_u16(vget_high_u16(__s1_589), splat_laneq_u16(__s2_589, __p3_589)); \
+#define vmls_laneq_f32(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \
+  float32x2_t __s0_589 = __p0_589; \
+  float32x2_t __s1_589 = __p1_589; \
+  float32x4_t __s2_589 = __p2_589; \
+  float32x2_t __ret_589; \
+  __ret_589 = __s0_589 - __s1_589 * splat_laneq_f32(__s2_589, __p3_589); \
   __ret_589; \
 })
 #else
-#define vmlsl_high_laneq_u16(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \
-  uint32x4_t __s0_590 = __p0_590; \
-  uint16x8_t __s1_590 = __p1_590; \
-  uint16x8_t __s2_590 = __p2_590; \
-  uint32x4_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \
-  uint16x8_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_590;  __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_590; \
-  __ret_590 = __rev0_590 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_590), __noswap_splat_laneq_u16(__rev2_590, __p3_590)); \
-  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 3, 2, 1, 0); \
+#define vmls_laneq_f32(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \
+  float32x2_t __s0_590 = __p0_590; \
+  float32x2_t __s1_590 = __p1_590; \
+  float32x4_t __s2_590 = __p2_590; \
+  float32x2_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 1, 0); \
+  float32x2_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \
+  float32x4_t __rev2_590;  __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 3, 2, 1, 0); \
+  float32x2_t __ret_590; \
+  __ret_590 = __rev0_590 - __rev1_590 * __noswap_splat_laneq_f32(__rev2_590, __p3_590); \
+  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 1, 0); \
   __ret_590; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \
-  int64x2_t __s0_591 = __p0_591; \
-  int32x4_t __s1_591 = __p1_591; \
+#define vmls_laneq_s32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \
+  int32x2_t __s0_591 = __p0_591; \
+  int32x2_t __s1_591 = __p1_591; \
   int32x4_t __s2_591 = __p2_591; \
-  int64x2_t __ret_591; \
-  __ret_591 = __s0_591 - vmull_s32(vget_high_s32(__s1_591), splat_laneq_s32(__s2_591, __p3_591)); \
+  int32x2_t __ret_591; \
+  __ret_591 = __s0_591 - __s1_591 * splat_laneq_s32(__s2_591, __p3_591); \
   __ret_591; \
 })
 #else
-#define vmlsl_high_laneq_s32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \
-  int64x2_t __s0_592 = __p0_592; \
-  int32x4_t __s1_592 = __p1_592; \
+#define vmls_laneq_s32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \
+  int32x2_t __s0_592 = __p0_592; \
+  int32x2_t __s1_592 = __p1_592; \
   int32x4_t __s2_592 = __p2_592; \
-  int64x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
-  int32x4_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 3, 2, 1, 0); \
+  int32x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
+  int32x2_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 1, 0); \
   int32x4_t __rev2_592;  __rev2_592 = __builtin_shufflevector(__s2_592, __s2_592, 3, 2, 1, 0); \
-  int64x2_t __ret_592; \
-  __ret_592 = __rev0_592 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_592), __noswap_splat_laneq_s32(__rev2_592, __p3_592)); \
+  int32x2_t __ret_592; \
+  __ret_592 = __rev0_592 - __rev1_592 * __noswap_splat_laneq_s32(__rev2_592, __p3_592); \
   __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \
   __ret_592; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \
-  int32x4_t __s0_593 = __p0_593; \
-  int16x8_t __s1_593 = __p1_593; \
+#define vmls_laneq_s16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \
+  int16x4_t __s0_593 = __p0_593; \
+  int16x4_t __s1_593 = __p1_593; \
   int16x8_t __s2_593 = __p2_593; \
-  int32x4_t __ret_593; \
-  __ret_593 = __s0_593 - vmull_s16(vget_high_s16(__s1_593), splat_laneq_s16(__s2_593, __p3_593)); \
+  int16x4_t __ret_593; \
+  __ret_593 = __s0_593 - __s1_593 * splat_laneq_s16(__s2_593, __p3_593); \
   __ret_593; \
 })
 #else
-#define vmlsl_high_laneq_s16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \
-  int32x4_t __s0_594 = __p0_594; \
-  int16x8_t __s1_594 = __p1_594; \
+#define vmls_laneq_s16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \
+  int16x4_t __s0_594 = __p0_594; \
+  int16x4_t __s1_594 = __p1_594; \
   int16x8_t __s2_594 = __p2_594; \
-  int32x4_t __rev0_594;  __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \
-  int16x8_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev0_594;  __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \
+  int16x4_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 3, 2, 1, 0); \
   int16x8_t __rev2_594;  __rev2_594 = __builtin_shufflevector(__s2_594, __s2_594, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_594; \
-  __ret_594 = __rev0_594 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_594), __noswap_splat_laneq_s16(__rev2_594, __p3_594)); \
+  int16x4_t __ret_594; \
+  __ret_594 = __rev0_594 - __rev1_594 * __noswap_splat_laneq_s16(__rev2_594, __p3_594); \
   __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \
   __ret_594; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \
+#define vmlsl_high_lane_u32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \
   uint64x2_t __s0_595 = __p0_595; \
-  uint32x2_t __s1_595 = __p1_595; \
-  uint32x4_t __s2_595 = __p2_595; \
+  uint32x4_t __s1_595 = __p1_595; \
+  uint32x2_t __s2_595 = __p2_595; \
   uint64x2_t __ret_595; \
-  __ret_595 = __s0_595 - vmull_u32(__s1_595, splat_laneq_u32(__s2_595, __p3_595)); \
+  __ret_595 = __s0_595 - vmull_u32(vget_high_u32(__s1_595), splat_lane_u32(__s2_595, __p3_595)); \
   __ret_595; \
 })
 #else
-#define vmlsl_laneq_u32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \
+#define vmlsl_high_lane_u32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \
   uint64x2_t __s0_596 = __p0_596; \
-  uint32x2_t __s1_596 = __p1_596; \
-  uint32x4_t __s2_596 = __p2_596; \
+  uint32x4_t __s1_596 = __p1_596; \
+  uint32x2_t __s2_596 = __p2_596; \
   uint64x2_t __rev0_596;  __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \
-  uint32x2_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 1, 0); \
-  uint32x4_t __rev2_596;  __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 3, 2, 1, 0); \
+  uint32x4_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 3, 2, 1, 0); \
+  uint32x2_t __rev2_596;  __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 1, 0); \
   uint64x2_t __ret_596; \
-  __ret_596 = __rev0_596 - __noswap_vmull_u32(__rev1_596, __noswap_splat_laneq_u32(__rev2_596, __p3_596)); \
+  __ret_596 = __rev0_596 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_596), __noswap_splat_lane_u32(__rev2_596, __p3_596)); \
   __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \
   __ret_596; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \
+#define vmlsl_high_lane_u16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \
   uint32x4_t __s0_597 = __p0_597; \
-  uint16x4_t __s1_597 = __p1_597; \
-  uint16x8_t __s2_597 = __p2_597; \
+  uint16x8_t __s1_597 = __p1_597; \
+  uint16x4_t __s2_597 = __p2_597; \
   uint32x4_t __ret_597; \
-  __ret_597 = __s0_597 - vmull_u16(__s1_597, splat_laneq_u16(__s2_597, __p3_597)); \
+  __ret_597 = __s0_597 - vmull_u16(vget_high_u16(__s1_597), splat_lane_u16(__s2_597, __p3_597)); \
   __ret_597; \
 })
 #else
-#define vmlsl_laneq_u16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \
+#define vmlsl_high_lane_u16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \
   uint32x4_t __s0_598 = __p0_598; \
-  uint16x4_t __s1_598 = __p1_598; \
-  uint16x8_t __s2_598 = __p2_598; \
+  uint16x8_t __s1_598 = __p1_598; \
+  uint16x4_t __s2_598 = __p2_598; \
   uint32x4_t __rev0_598;  __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \
-  uint16x4_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 3, 2, 1, 0); \
-  uint16x8_t __rev2_598;  __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_598;  __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 3, 2, 1, 0); \
   uint32x4_t __ret_598; \
-  __ret_598 = __rev0_598 - __noswap_vmull_u16(__rev1_598, __noswap_splat_laneq_u16(__rev2_598, __p3_598)); \
+  __ret_598 = __rev0_598 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_598), __noswap_splat_lane_u16(__rev2_598, __p3_598)); \
   __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 3, 2, 1, 0); \
   __ret_598; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \
+#define vmlsl_high_lane_s32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \
   int64x2_t __s0_599 = __p0_599; \
-  int32x2_t __s1_599 = __p1_599; \
-  int32x4_t __s2_599 = __p2_599; \
+  int32x4_t __s1_599 = __p1_599; \
+  int32x2_t __s2_599 = __p2_599; \
   int64x2_t __ret_599; \
-  __ret_599 = __s0_599 - vmull_s32(__s1_599, splat_laneq_s32(__s2_599, __p3_599)); \
+  __ret_599 = __s0_599 - vmull_s32(vget_high_s32(__s1_599), splat_lane_s32(__s2_599, __p3_599)); \
   __ret_599; \
 })
 #else
-#define vmlsl_laneq_s32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \
+#define vmlsl_high_lane_s32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \
   int64x2_t __s0_600 = __p0_600; \
-  int32x2_t __s1_600 = __p1_600; \
-  int32x4_t __s2_600 = __p2_600; \
+  int32x4_t __s1_600 = __p1_600; \
+  int32x2_t __s2_600 = __p2_600; \
   int64x2_t __rev0_600;  __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 1, 0); \
-  int32x2_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 1, 0); \
-  int32x4_t __rev2_600;  __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 3, 2, 1, 0); \
+  int32x4_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \
+  int32x2_t __rev2_600;  __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 1, 0); \
   int64x2_t __ret_600; \
-  __ret_600 = __rev0_600 - __noswap_vmull_s32(__rev1_600, __noswap_splat_laneq_s32(__rev2_600, __p3_600)); \
+  __ret_600 = __rev0_600 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_600), __noswap_splat_lane_s32(__rev2_600, __p3_600)); \
   __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 1, 0); \
   __ret_600; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \
+#define vmlsl_high_lane_s16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \
   int32x4_t __s0_601 = __p0_601; \
-  int16x4_t __s1_601 = __p1_601; \
-  int16x8_t __s2_601 = __p2_601; \
+  int16x8_t __s1_601 = __p1_601; \
+  int16x4_t __s2_601 = __p2_601; \
   int32x4_t __ret_601; \
-  __ret_601 = __s0_601 - vmull_s16(__s1_601, splat_laneq_s16(__s2_601, __p3_601)); \
+  __ret_601 = __s0_601 - vmull_s16(vget_high_s16(__s1_601), splat_lane_s16(__s2_601, __p3_601)); \
   __ret_601; \
 })
 #else
-#define vmlsl_laneq_s16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \
+#define vmlsl_high_lane_s16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \
   int32x4_t __s0_602 = __p0_602; \
-  int16x4_t __s1_602 = __p1_602; \
-  int16x8_t __s2_602 = __p2_602; \
+  int16x8_t __s1_602 = __p1_602; \
+  int16x4_t __s2_602 = __p2_602; \
   int32x4_t __rev0_602;  __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 3, 2, 1, 0); \
-  int16x4_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \
-  int16x8_t __rev2_602;  __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_602;  __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 3, 2, 1, 0); \
   int32x4_t __ret_602; \
-  __ret_602 = __rev0_602 - __noswap_vmull_s16(__rev1_602, __noswap_splat_laneq_s16(__rev2_602, __p3_602)); \
+  __ret_602 = __rev0_602 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_602), __noswap_splat_lane_s16(__rev2_602, __p3_602)); \
   __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 3, 2, 1, 0); \
   __ret_602; \
 })
 #endif
 
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_u32(__p0_603, __p1_603, __p2_603, __p3_603) __extension__ ({ \
+  uint64x2_t __s0_603 = __p0_603; \
+  uint32x4_t __s1_603 = __p1_603; \
+  uint32x4_t __s2_603 = __p2_603; \
+  uint64x2_t __ret_603; \
+  __ret_603 = __s0_603 - vmull_u32(vget_high_u32(__s1_603), splat_laneq_u32(__s2_603, __p3_603)); \
+  __ret_603; \
+})
+#else
+#define vmlsl_high_laneq_u32(__p0_604, __p1_604, __p2_604, __p3_604) __extension__ ({ \
+  uint64x2_t __s0_604 = __p0_604; \
+  uint32x4_t __s1_604 = __p1_604; \
+  uint32x4_t __s2_604 = __p2_604; \
+  uint64x2_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 1, 0); \
+  uint32x4_t __rev1_604;  __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \
+  uint32x4_t __rev2_604;  __rev2_604 = __builtin_shufflevector(__s2_604, __s2_604, 3, 2, 1, 0); \
+  uint64x2_t __ret_604; \
+  __ret_604 = __rev0_604 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_604), __noswap_splat_laneq_u32(__rev2_604, __p3_604)); \
+  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 1, 0); \
+  __ret_604; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_u16(__p0_605, __p1_605, __p2_605, __p3_605) __extension__ ({ \
+  uint32x4_t __s0_605 = __p0_605; \
+  uint16x8_t __s1_605 = __p1_605; \
+  uint16x8_t __s2_605 = __p2_605; \
+  uint32x4_t __ret_605; \
+  __ret_605 = __s0_605 - vmull_u16(vget_high_u16(__s1_605), splat_laneq_u16(__s2_605, __p3_605)); \
+  __ret_605; \
+})
+#else
+#define vmlsl_high_laneq_u16(__p0_606, __p1_606, __p2_606, __p3_606) __extension__ ({ \
+  uint32x4_t __s0_606 = __p0_606; \
+  uint16x8_t __s1_606 = __p1_606; \
+  uint16x8_t __s2_606 = __p2_606; \
+  uint32x4_t __rev0_606;  __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 3, 2, 1, 0); \
+  uint16x8_t __rev1_606;  __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_606;  __rev2_606 = __builtin_shufflevector(__s2_606, __s2_606, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_606; \
+  __ret_606 = __rev0_606 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_606), __noswap_splat_laneq_u16(__rev2_606, __p3_606)); \
+  __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 3, 2, 1, 0); \
+  __ret_606; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_s32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \
+  int64x2_t __s0_607 = __p0_607; \
+  int32x4_t __s1_607 = __p1_607; \
+  int32x4_t __s2_607 = __p2_607; \
+  int64x2_t __ret_607; \
+  __ret_607 = __s0_607 - vmull_s32(vget_high_s32(__s1_607), splat_laneq_s32(__s2_607, __p3_607)); \
+  __ret_607; \
+})
+#else
+#define vmlsl_high_laneq_s32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \
+  int64x2_t __s0_608 = __p0_608; \
+  int32x4_t __s1_608 = __p1_608; \
+  int32x4_t __s2_608 = __p2_608; \
+  int64x2_t __rev0_608;  __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \
+  int32x4_t __rev1_608;  __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 3, 2, 1, 0); \
+  int32x4_t __rev2_608;  __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 3, 2, 1, 0); \
+  int64x2_t __ret_608; \
+  __ret_608 = __rev0_608 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_608), __noswap_splat_laneq_s32(__rev2_608, __p3_608)); \
+  __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \
+  __ret_608; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_s16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \
+  int32x4_t __s0_609 = __p0_609; \
+  int16x8_t __s1_609 = __p1_609; \
+  int16x8_t __s2_609 = __p2_609; \
+  int32x4_t __ret_609; \
+  __ret_609 = __s0_609 - vmull_s16(vget_high_s16(__s1_609), splat_laneq_s16(__s2_609, __p3_609)); \
+  __ret_609; \
+})
+#else
+#define vmlsl_high_laneq_s16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \
+  int32x4_t __s0_610 = __p0_610; \
+  int16x8_t __s1_610 = __p1_610; \
+  int16x8_t __s2_610 = __p2_610; \
+  int32x4_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \
+  int16x8_t __rev1_610;  __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_610;  __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_610; \
+  __ret_610 = __rev0_610 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_610), __noswap_splat_laneq_s16(__rev2_610, __p3_610)); \
+  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \
+  __ret_610; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_u32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \
+  uint64x2_t __s0_611 = __p0_611; \
+  uint32x2_t __s1_611 = __p1_611; \
+  uint32x4_t __s2_611 = __p2_611; \
+  uint64x2_t __ret_611; \
+  __ret_611 = __s0_611 - vmull_u32(__s1_611, splat_laneq_u32(__s2_611, __p3_611)); \
+  __ret_611; \
+})
+#else
+#define vmlsl_laneq_u32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \
+  uint64x2_t __s0_612 = __p0_612; \
+  uint32x2_t __s1_612 = __p1_612; \
+  uint32x4_t __s2_612 = __p2_612; \
+  uint64x2_t __rev0_612;  __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \
+  uint32x2_t __rev1_612;  __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 1, 0); \
+  uint32x4_t __rev2_612;  __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \
+  uint64x2_t __ret_612; \
+  __ret_612 = __rev0_612 - __noswap_vmull_u32(__rev1_612, __noswap_splat_laneq_u32(__rev2_612, __p3_612)); \
+  __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \
+  __ret_612; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_u16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \
+  uint32x4_t __s0_613 = __p0_613; \
+  uint16x4_t __s1_613 = __p1_613; \
+  uint16x8_t __s2_613 = __p2_613; \
+  uint32x4_t __ret_613; \
+  __ret_613 = __s0_613 - vmull_u16(__s1_613, splat_laneq_u16(__s2_613, __p3_613)); \
+  __ret_613; \
+})
+#else
+#define vmlsl_laneq_u16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \
+  uint32x4_t __s0_614 = __p0_614; \
+  uint16x4_t __s1_614 = __p1_614; \
+  uint16x8_t __s2_614 = __p2_614; \
+  uint32x4_t __rev0_614;  __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \
+  uint16x4_t __rev1_614;  __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 3, 2, 1, 0); \
+  uint16x8_t __rev2_614;  __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_614; \
+  __ret_614 = __rev0_614 - __noswap_vmull_u16(__rev1_614, __noswap_splat_laneq_u16(__rev2_614, __p3_614)); \
+  __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \
+  __ret_614; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_s32(__p0_615, __p1_615, __p2_615, __p3_615) __extension__ ({ \
+  int64x2_t __s0_615 = __p0_615; \
+  int32x2_t __s1_615 = __p1_615; \
+  int32x4_t __s2_615 = __p2_615; \
+  int64x2_t __ret_615; \
+  __ret_615 = __s0_615 - vmull_s32(__s1_615, splat_laneq_s32(__s2_615, __p3_615)); \
+  __ret_615; \
+})
+#else
+#define vmlsl_laneq_s32(__p0_616, __p1_616, __p2_616, __p3_616) __extension__ ({ \
+  int64x2_t __s0_616 = __p0_616; \
+  int32x2_t __s1_616 = __p1_616; \
+  int32x4_t __s2_616 = __p2_616; \
+  int64x2_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__s0_616, __s0_616, 1, 0); \
+  int32x2_t __rev1_616;  __rev1_616 = __builtin_shufflevector(__s1_616, __s1_616, 1, 0); \
+  int32x4_t __rev2_616;  __rev2_616 = __builtin_shufflevector(__s2_616, __s2_616, 3, 2, 1, 0); \
+  int64x2_t __ret_616; \
+  __ret_616 = __rev0_616 - __noswap_vmull_s32(__rev1_616, __noswap_splat_laneq_s32(__rev2_616, __p3_616)); \
+  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0); \
+  __ret_616; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_s16(__p0_617, __p1_617, __p2_617, __p3_617) __extension__ ({ \
+  int32x4_t __s0_617 = __p0_617; \
+  int16x4_t __s1_617 = __p1_617; \
+  int16x8_t __s2_617 = __p2_617; \
+  int32x4_t __ret_617; \
+  __ret_617 = __s0_617 - vmull_s16(__s1_617, splat_laneq_s16(__s2_617, __p3_617)); \
+  __ret_617; \
+})
+#else
+#define vmlsl_laneq_s16(__p0_618, __p1_618, __p2_618, __p3_618) __extension__ ({ \
+  int32x4_t __s0_618 = __p0_618; \
+  int16x4_t __s1_618 = __p1_618; \
+  int16x8_t __s2_618 = __p2_618; \
+  int32x4_t __rev0_618;  __rev0_618 = __builtin_shufflevector(__s0_618, __s0_618, 3, 2, 1, 0); \
+  int16x4_t __rev1_618;  __rev1_618 = __builtin_shufflevector(__s1_618, __s1_618, 3, 2, 1, 0); \
+  int16x8_t __rev2_618;  __rev2_618 = __builtin_shufflevector(__s2_618, __s2_618, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_618; \
+  __ret_618 = __rev0_618 - __noswap_vmull_s16(__rev1_618, __noswap_splat_laneq_s16(__rev2_618, __p3_618)); \
+  __ret_618 = __builtin_shufflevector(__ret_618, __ret_618, 3, 2, 1, 0); \
+  __ret_618; \
+})
+#endif
+
 __ai poly64x1_t vmov_n_p64(poly64_t __p0) {
   poly64x1_t __ret;
   __ret = (poly64x1_t) {__p0};
@@ -56283,147 +56471,147 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_603) {
-  uint16x8_t __ret_603;
-  uint8x8_t __a1_603 = vget_high_u8(__p0_603);
-  __ret_603 = (uint16x8_t)(vshll_n_u8(__a1_603, 0));
-  return __ret_603;
-}
-#else
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_604) {
-  uint8x16_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__p0_604, __p0_604, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret_604;
-  uint8x8_t __a1_604 = __noswap_vget_high_u8(__rev0_604);
-  __ret_604 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_604, 0));
-  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_604;
-}
-__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_605) {
-  uint16x8_t __ret_605;
-  uint8x8_t __a1_605 = __noswap_vget_high_u8(__p0_605);
-  __ret_605 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_605, 0));
-  return __ret_605;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_606) {
-  uint64x2_t __ret_606;
-  uint32x2_t __a1_606 = vget_high_u32(__p0_606);
-  __ret_606 = (uint64x2_t)(vshll_n_u32(__a1_606, 0));
-  return __ret_606;
-}
-#else
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_607) {
-  uint32x4_t __rev0_607;  __rev0_607 = __builtin_shufflevector(__p0_607, __p0_607, 3, 2, 1, 0);
-  uint64x2_t __ret_607;
-  uint32x2_t __a1_607 = __noswap_vget_high_u32(__rev0_607);
-  __ret_607 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_607, 0));
-  __ret_607 = __builtin_shufflevector(__ret_607, __ret_607, 1, 0);
-  return __ret_607;
-}
-__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_608) {
-  uint64x2_t __ret_608;
-  uint32x2_t __a1_608 = __noswap_vget_high_u32(__p0_608);
-  __ret_608 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_608, 0));
-  return __ret_608;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_609) {
-  uint32x4_t __ret_609;
-  uint16x4_t __a1_609 = vget_high_u16(__p0_609);
-  __ret_609 = (uint32x4_t)(vshll_n_u16(__a1_609, 0));
-  return __ret_609;
-}
-#else
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_610) {
-  uint16x8_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__p0_610, __p0_610, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret_610;
-  uint16x4_t __a1_610 = __noswap_vget_high_u16(__rev0_610);
-  __ret_610 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_610, 0));
-  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0);
-  return __ret_610;
-}
-__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_611) {
-  uint32x4_t __ret_611;
-  uint16x4_t __a1_611 = __noswap_vget_high_u16(__p0_611);
-  __ret_611 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_611, 0));
-  return __ret_611;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_612) {
-  int16x8_t __ret_612;
-  int8x8_t __a1_612 = vget_high_s8(__p0_612);
-  __ret_612 = (int16x8_t)(vshll_n_s8(__a1_612, 0));
-  return __ret_612;
-}
-#else
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_613) {
-  int8x16_t __rev0_613;  __rev0_613 = __builtin_shufflevector(__p0_613, __p0_613, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret_613;
-  int8x8_t __a1_613 = __noswap_vget_high_s8(__rev0_613);
-  __ret_613 = (int16x8_t)(__noswap_vshll_n_s8(__a1_613, 0));
-  __ret_613 = __builtin_shufflevector(__ret_613, __ret_613, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_613;
-}
-__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_614) {
-  int16x8_t __ret_614;
-  int8x8_t __a1_614 = __noswap_vget_high_s8(__p0_614);
-  __ret_614 = (int16x8_t)(__noswap_vshll_n_s8(__a1_614, 0));
-  return __ret_614;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_615) {
-  int64x2_t __ret_615;
-  int32x2_t __a1_615 = vget_high_s32(__p0_615);
-  __ret_615 = (int64x2_t)(vshll_n_s32(__a1_615, 0));
-  return __ret_615;
-}
-#else
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_616) {
-  int32x4_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__p0_616, __p0_616, 3, 2, 1, 0);
-  int64x2_t __ret_616;
-  int32x2_t __a1_616 = __noswap_vget_high_s32(__rev0_616);
-  __ret_616 = (int64x2_t)(__noswap_vshll_n_s32(__a1_616, 0));
-  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0);
-  return __ret_616;
-}
-__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_617) {
-  int64x2_t __ret_617;
-  int32x2_t __a1_617 = __noswap_vget_high_s32(__p0_617);
-  __ret_617 = (int64x2_t)(__noswap_vshll_n_s32(__a1_617, 0));
-  return __ret_617;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_618) {
-  int32x4_t __ret_618;
-  int16x4_t __a1_618 = vget_high_s16(__p0_618);
-  __ret_618 = (int32x4_t)(vshll_n_s16(__a1_618, 0));
-  return __ret_618;
-}
-#else
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_619) {
-  int16x8_t __rev0_619;  __rev0_619 = __builtin_shufflevector(__p0_619, __p0_619, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret_619;
-  int16x4_t __a1_619 = __noswap_vget_high_s16(__rev0_619);
-  __ret_619 = (int32x4_t)(__noswap_vshll_n_s16(__a1_619, 0));
-  __ret_619 = __builtin_shufflevector(__ret_619, __ret_619, 3, 2, 1, 0);
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_619) {
+  uint16x8_t __ret_619;
+  uint8x8_t __a1_619 = vget_high_u8(__p0_619);
+  __ret_619 = (uint16x8_t)(vshll_n_u8(__a1_619, 0));
   return __ret_619;
 }
-__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_620) {
-  int32x4_t __ret_620;
-  int16x4_t __a1_620 = __noswap_vget_high_s16(__p0_620);
-  __ret_620 = (int32x4_t)(__noswap_vshll_n_s16(__a1_620, 0));
+#else
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_620) {
+  uint8x16_t __rev0_620;  __rev0_620 = __builtin_shufflevector(__p0_620, __p0_620, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __ret_620;
+  uint8x8_t __a1_620 = __noswap_vget_high_u8(__rev0_620);
+  __ret_620 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_620, 0));
+  __ret_620 = __builtin_shufflevector(__ret_620, __ret_620, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret_620;
 }
+__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_621) {
+  uint16x8_t __ret_621;
+  uint8x8_t __a1_621 = __noswap_vget_high_u8(__p0_621);
+  __ret_621 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_621, 0));
+  return __ret_621;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_622) {
+  uint64x2_t __ret_622;
+  uint32x2_t __a1_622 = vget_high_u32(__p0_622);
+  __ret_622 = (uint64x2_t)(vshll_n_u32(__a1_622, 0));
+  return __ret_622;
+}
+#else
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_623) {
+  uint32x4_t __rev0_623;  __rev0_623 = __builtin_shufflevector(__p0_623, __p0_623, 3, 2, 1, 0);
+  uint64x2_t __ret_623;
+  uint32x2_t __a1_623 = __noswap_vget_high_u32(__rev0_623);
+  __ret_623 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_623, 0));
+  __ret_623 = __builtin_shufflevector(__ret_623, __ret_623, 1, 0);
+  return __ret_623;
+}
+__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_624) {
+  uint64x2_t __ret_624;
+  uint32x2_t __a1_624 = __noswap_vget_high_u32(__p0_624);
+  __ret_624 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_624, 0));
+  return __ret_624;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_625) {
+  uint32x4_t __ret_625;
+  uint16x4_t __a1_625 = vget_high_u16(__p0_625);
+  __ret_625 = (uint32x4_t)(vshll_n_u16(__a1_625, 0));
+  return __ret_625;
+}
+#else
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_626) {
+  uint16x8_t __rev0_626;  __rev0_626 = __builtin_shufflevector(__p0_626, __p0_626, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint32x4_t __ret_626;
+  uint16x4_t __a1_626 = __noswap_vget_high_u16(__rev0_626);
+  __ret_626 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_626, 0));
+  __ret_626 = __builtin_shufflevector(__ret_626, __ret_626, 3, 2, 1, 0);
+  return __ret_626;
+}
+__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_627) {
+  uint32x4_t __ret_627;
+  uint16x4_t __a1_627 = __noswap_vget_high_u16(__p0_627);
+  __ret_627 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_627, 0));
+  return __ret_627;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_628) {
+  int16x8_t __ret_628;
+  int8x8_t __a1_628 = vget_high_s8(__p0_628);
+  __ret_628 = (int16x8_t)(vshll_n_s8(__a1_628, 0));
+  return __ret_628;
+}
+#else
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_629) {
+  int8x16_t __rev0_629;  __rev0_629 = __builtin_shufflevector(__p0_629, __p0_629, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __ret_629;
+  int8x8_t __a1_629 = __noswap_vget_high_s8(__rev0_629);
+  __ret_629 = (int16x8_t)(__noswap_vshll_n_s8(__a1_629, 0));
+  __ret_629 = __builtin_shufflevector(__ret_629, __ret_629, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret_629;
+}
+__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_630) {
+  int16x8_t __ret_630;
+  int8x8_t __a1_630 = __noswap_vget_high_s8(__p0_630);
+  __ret_630 = (int16x8_t)(__noswap_vshll_n_s8(__a1_630, 0));
+  return __ret_630;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_631) {
+  int64x2_t __ret_631;
+  int32x2_t __a1_631 = vget_high_s32(__p0_631);
+  __ret_631 = (int64x2_t)(vshll_n_s32(__a1_631, 0));
+  return __ret_631;
+}
+#else
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_632) {
+  int32x4_t __rev0_632;  __rev0_632 = __builtin_shufflevector(__p0_632, __p0_632, 3, 2, 1, 0);
+  int64x2_t __ret_632;
+  int32x2_t __a1_632 = __noswap_vget_high_s32(__rev0_632);
+  __ret_632 = (int64x2_t)(__noswap_vshll_n_s32(__a1_632, 0));
+  __ret_632 = __builtin_shufflevector(__ret_632, __ret_632, 1, 0);
+  return __ret_632;
+}
+__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_633) {
+  int64x2_t __ret_633;
+  int32x2_t __a1_633 = __noswap_vget_high_s32(__p0_633);
+  __ret_633 = (int64x2_t)(__noswap_vshll_n_s32(__a1_633, 0));
+  return __ret_633;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_634) {
+  int32x4_t __ret_634;
+  int16x4_t __a1_634 = vget_high_s16(__p0_634);
+  __ret_634 = (int32x4_t)(vshll_n_s16(__a1_634, 0));
+  return __ret_634;
+}
+#else
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_635) {
+  int16x8_t __rev0_635;  __rev0_635 = __builtin_shufflevector(__p0_635, __p0_635, 7, 6, 5, 4, 3, 2, 1, 0);
+  int32x4_t __ret_635;
+  int16x4_t __a1_635 = __noswap_vget_high_s16(__rev0_635);
+  __ret_635 = (int32x4_t)(__noswap_vshll_n_s16(__a1_635, 0));
+  __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 3, 2, 1, 0);
+  return __ret_635;
+}
+__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_636) {
+  int32x4_t __ret_636;
+  int16x4_t __a1_636 = __noswap_vget_high_s16(__p0_636);
+  __ret_636 = (int32x4_t)(__noswap_vshll_n_s16(__a1_636, 0));
+  return __ret_636;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -56550,29 +56738,29 @@
   __ret = __p0 * __p1;
   return __ret;
 }
-#define vmuld_lane_f64(__p0_621, __p1_621, __p2_621) __extension__ ({ \
-  float64_t __s0_621 = __p0_621; \
-  float64x1_t __s1_621 = __p1_621; \
-  float64_t __ret_621; \
-  __ret_621 = __s0_621 * vget_lane_f64(__s1_621, __p2_621); \
-  __ret_621; \
+#define vmuld_lane_f64(__p0_637, __p1_637, __p2_637) __extension__ ({ \
+  float64_t __s0_637 = __p0_637; \
+  float64x1_t __s1_637 = __p1_637; \
+  float64_t __ret_637; \
+  __ret_637 = __s0_637 * vget_lane_f64(__s1_637, __p2_637); \
+  __ret_637; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmuls_lane_f32(__p0_622, __p1_622, __p2_622) __extension__ ({ \
-  float32_t __s0_622 = __p0_622; \
-  float32x2_t __s1_622 = __p1_622; \
-  float32_t __ret_622; \
-  __ret_622 = __s0_622 * vget_lane_f32(__s1_622, __p2_622); \
-  __ret_622; \
+#define vmuls_lane_f32(__p0_638, __p1_638, __p2_638) __extension__ ({ \
+  float32_t __s0_638 = __p0_638; \
+  float32x2_t __s1_638 = __p1_638; \
+  float32_t __ret_638; \
+  __ret_638 = __s0_638 * vget_lane_f32(__s1_638, __p2_638); \
+  __ret_638; \
 })
 #else
-#define vmuls_lane_f32(__p0_623, __p1_623, __p2_623) __extension__ ({ \
-  float32_t __s0_623 = __p0_623; \
-  float32x2_t __s1_623 = __p1_623; \
-  float32x2_t __rev1_623;  __rev1_623 = __builtin_shufflevector(__s1_623, __s1_623, 1, 0); \
-  float32_t __ret_623; \
-  __ret_623 = __s0_623 * __noswap_vget_lane_f32(__rev1_623, __p2_623); \
-  __ret_623; \
+#define vmuls_lane_f32(__p0_639, __p1_639, __p2_639) __extension__ ({ \
+  float32_t __s0_639 = __p0_639; \
+  float32x2_t __s1_639 = __p1_639; \
+  float32x2_t __rev1_639;  __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 1, 0); \
+  float32_t __ret_639; \
+  __ret_639 = __s0_639 * __noswap_vget_lane_f32(__rev1_639, __p2_639); \
+  __ret_639; \
 })
 #endif
 
@@ -56584,60 +56772,60 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f64(__p0_624, __p1_624, __p2_624) __extension__ ({ \
-  float64x2_t __s0_624 = __p0_624; \
-  float64x1_t __s1_624 = __p1_624; \
-  float64x2_t __ret_624; \
-  __ret_624 = __s0_624 * splatq_lane_f64(__s1_624, __p2_624); \
-  __ret_624; \
+#define vmulq_lane_f64(__p0_640, __p1_640, __p2_640) __extension__ ({ \
+  float64x2_t __s0_640 = __p0_640; \
+  float64x1_t __s1_640 = __p1_640; \
+  float64x2_t __ret_640; \
+  __ret_640 = __s0_640 * splatq_lane_f64(__s1_640, __p2_640); \
+  __ret_640; \
 })
 #else
-#define vmulq_lane_f64(__p0_625, __p1_625, __p2_625) __extension__ ({ \
-  float64x2_t __s0_625 = __p0_625; \
-  float64x1_t __s1_625 = __p1_625; \
-  float64x2_t __rev0_625;  __rev0_625 = __builtin_shufflevector(__s0_625, __s0_625, 1, 0); \
-  float64x2_t __ret_625; \
-  __ret_625 = __rev0_625 * __noswap_splatq_lane_f64(__s1_625, __p2_625); \
-  __ret_625 = __builtin_shufflevector(__ret_625, __ret_625, 1, 0); \
-  __ret_625; \
+#define vmulq_lane_f64(__p0_641, __p1_641, __p2_641) __extension__ ({ \
+  float64x2_t __s0_641 = __p0_641; \
+  float64x1_t __s1_641 = __p1_641; \
+  float64x2_t __rev0_641;  __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 1, 0); \
+  float64x2_t __ret_641; \
+  __ret_641 = __rev0_641 * __noswap_splatq_lane_f64(__s1_641, __p2_641); \
+  __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 1, 0); \
+  __ret_641; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmuld_laneq_f64(__p0_626, __p1_626, __p2_626) __extension__ ({ \
-  float64_t __s0_626 = __p0_626; \
-  float64x2_t __s1_626 = __p1_626; \
-  float64_t __ret_626; \
-  __ret_626 = __s0_626 * vgetq_lane_f64(__s1_626, __p2_626); \
-  __ret_626; \
+#define vmuld_laneq_f64(__p0_642, __p1_642, __p2_642) __extension__ ({ \
+  float64_t __s0_642 = __p0_642; \
+  float64x2_t __s1_642 = __p1_642; \
+  float64_t __ret_642; \
+  __ret_642 = __s0_642 * vgetq_lane_f64(__s1_642, __p2_642); \
+  __ret_642; \
 })
 #else
-#define vmuld_laneq_f64(__p0_627, __p1_627, __p2_627) __extension__ ({ \
-  float64_t __s0_627 = __p0_627; \
-  float64x2_t __s1_627 = __p1_627; \
-  float64x2_t __rev1_627;  __rev1_627 = __builtin_shufflevector(__s1_627, __s1_627, 1, 0); \
-  float64_t __ret_627; \
-  __ret_627 = __s0_627 * __noswap_vgetq_lane_f64(__rev1_627, __p2_627); \
-  __ret_627; \
+#define vmuld_laneq_f64(__p0_643, __p1_643, __p2_643) __extension__ ({ \
+  float64_t __s0_643 = __p0_643; \
+  float64x2_t __s1_643 = __p1_643; \
+  float64x2_t __rev1_643;  __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 1, 0); \
+  float64_t __ret_643; \
+  __ret_643 = __s0_643 * __noswap_vgetq_lane_f64(__rev1_643, __p2_643); \
+  __ret_643; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmuls_laneq_f32(__p0_628, __p1_628, __p2_628) __extension__ ({ \
-  float32_t __s0_628 = __p0_628; \
-  float32x4_t __s1_628 = __p1_628; \
-  float32_t __ret_628; \
-  __ret_628 = __s0_628 * vgetq_lane_f32(__s1_628, __p2_628); \
-  __ret_628; \
+#define vmuls_laneq_f32(__p0_644, __p1_644, __p2_644) __extension__ ({ \
+  float32_t __s0_644 = __p0_644; \
+  float32x4_t __s1_644 = __p1_644; \
+  float32_t __ret_644; \
+  __ret_644 = __s0_644 * vgetq_lane_f32(__s1_644, __p2_644); \
+  __ret_644; \
 })
 #else
-#define vmuls_laneq_f32(__p0_629, __p1_629, __p2_629) __extension__ ({ \
-  float32_t __s0_629 = __p0_629; \
-  float32x4_t __s1_629 = __p1_629; \
-  float32x4_t __rev1_629;  __rev1_629 = __builtin_shufflevector(__s1_629, __s1_629, 3, 2, 1, 0); \
-  float32_t __ret_629; \
-  __ret_629 = __s0_629 * __noswap_vgetq_lane_f32(__rev1_629, __p2_629); \
-  __ret_629; \
+#define vmuls_laneq_f32(__p0_645, __p1_645, __p2_645) __extension__ ({ \
+  float32_t __s0_645 = __p0_645; \
+  float32x4_t __s1_645 = __p1_645; \
+  float32x4_t __rev1_645;  __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 3, 2, 1, 0); \
+  float32_t __ret_645; \
+  __ret_645 = __s0_645 * __noswap_vgetq_lane_f32(__rev1_645, __p2_645); \
+  __ret_645; \
 })
 #endif
 
@@ -56661,236 +56849,236 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u32(__p0_630, __p1_630, __p2_630) __extension__ ({ \
-  uint32x4_t __s0_630 = __p0_630; \
-  uint32x4_t __s1_630 = __p1_630; \
-  uint32x4_t __ret_630; \
-  __ret_630 = __s0_630 * splatq_laneq_u32(__s1_630, __p2_630); \
-  __ret_630; \
-})
-#else
-#define vmulq_laneq_u32(__p0_631, __p1_631, __p2_631) __extension__ ({ \
-  uint32x4_t __s0_631 = __p0_631; \
-  uint32x4_t __s1_631 = __p1_631; \
-  uint32x4_t __rev0_631;  __rev0_631 = __builtin_shufflevector(__s0_631, __s0_631, 3, 2, 1, 0); \
-  uint32x4_t __rev1_631;  __rev1_631 = __builtin_shufflevector(__s1_631, __s1_631, 3, 2, 1, 0); \
-  uint32x4_t __ret_631; \
-  __ret_631 = __rev0_631 * __noswap_splatq_laneq_u32(__rev1_631, __p2_631); \
-  __ret_631 = __builtin_shufflevector(__ret_631, __ret_631, 3, 2, 1, 0); \
-  __ret_631; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u16(__p0_632, __p1_632, __p2_632) __extension__ ({ \
-  uint16x8_t __s0_632 = __p0_632; \
-  uint16x8_t __s1_632 = __p1_632; \
-  uint16x8_t __ret_632; \
-  __ret_632 = __s0_632 * splatq_laneq_u16(__s1_632, __p2_632); \
-  __ret_632; \
-})
-#else
-#define vmulq_laneq_u16(__p0_633, __p1_633, __p2_633) __extension__ ({ \
-  uint16x8_t __s0_633 = __p0_633; \
-  uint16x8_t __s1_633 = __p1_633; \
-  uint16x8_t __rev0_633;  __rev0_633 = __builtin_shufflevector(__s0_633, __s0_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_633;  __rev1_633 = __builtin_shufflevector(__s1_633, __s1_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_633; \
-  __ret_633 = __rev0_633 * __noswap_splatq_laneq_u16(__rev1_633, __p2_633); \
-  __ret_633 = __builtin_shufflevector(__ret_633, __ret_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_633; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f64(__p0_634, __p1_634, __p2_634) __extension__ ({ \
-  float64x2_t __s0_634 = __p0_634; \
-  float64x2_t __s1_634 = __p1_634; \
-  float64x2_t __ret_634; \
-  __ret_634 = __s0_634 * splatq_laneq_f64(__s1_634, __p2_634); \
-  __ret_634; \
-})
-#else
-#define vmulq_laneq_f64(__p0_635, __p1_635, __p2_635) __extension__ ({ \
-  float64x2_t __s0_635 = __p0_635; \
-  float64x2_t __s1_635 = __p1_635; \
-  float64x2_t __rev0_635;  __rev0_635 = __builtin_shufflevector(__s0_635, __s0_635, 1, 0); \
-  float64x2_t __rev1_635;  __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, 1, 0); \
-  float64x2_t __ret_635; \
-  __ret_635 = __rev0_635 * __noswap_splatq_laneq_f64(__rev1_635, __p2_635); \
-  __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 1, 0); \
-  __ret_635; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f32(__p0_636, __p1_636, __p2_636) __extension__ ({ \
-  float32x4_t __s0_636 = __p0_636; \
-  float32x4_t __s1_636 = __p1_636; \
-  float32x4_t __ret_636; \
-  __ret_636 = __s0_636 * splatq_laneq_f32(__s1_636, __p2_636); \
-  __ret_636; \
-})
-#else
-#define vmulq_laneq_f32(__p0_637, __p1_637, __p2_637) __extension__ ({ \
-  float32x4_t __s0_637 = __p0_637; \
-  float32x4_t __s1_637 = __p1_637; \
-  float32x4_t __rev0_637;  __rev0_637 = __builtin_shufflevector(__s0_637, __s0_637, 3, 2, 1, 0); \
-  float32x4_t __rev1_637;  __rev1_637 = __builtin_shufflevector(__s1_637, __s1_637, 3, 2, 1, 0); \
-  float32x4_t __ret_637; \
-  __ret_637 = __rev0_637 * __noswap_splatq_laneq_f32(__rev1_637, __p2_637); \
-  __ret_637 = __builtin_shufflevector(__ret_637, __ret_637, 3, 2, 1, 0); \
-  __ret_637; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s32(__p0_638, __p1_638, __p2_638) __extension__ ({ \
-  int32x4_t __s0_638 = __p0_638; \
-  int32x4_t __s1_638 = __p1_638; \
-  int32x4_t __ret_638; \
-  __ret_638 = __s0_638 * splatq_laneq_s32(__s1_638, __p2_638); \
-  __ret_638; \
-})
-#else
-#define vmulq_laneq_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \
-  int32x4_t __s0_639 = __p0_639; \
-  int32x4_t __s1_639 = __p1_639; \
-  int32x4_t __rev0_639;  __rev0_639 = __builtin_shufflevector(__s0_639, __s0_639, 3, 2, 1, 0); \
-  int32x4_t __rev1_639;  __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 3, 2, 1, 0); \
-  int32x4_t __ret_639; \
-  __ret_639 = __rev0_639 * __noswap_splatq_laneq_s32(__rev1_639, __p2_639); \
-  __ret_639 = __builtin_shufflevector(__ret_639, __ret_639, 3, 2, 1, 0); \
-  __ret_639; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s16(__p0_640, __p1_640, __p2_640) __extension__ ({ \
-  int16x8_t __s0_640 = __p0_640; \
-  int16x8_t __s1_640 = __p1_640; \
-  int16x8_t __ret_640; \
-  __ret_640 = __s0_640 * splatq_laneq_s16(__s1_640, __p2_640); \
-  __ret_640; \
-})
-#else
-#define vmulq_laneq_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \
-  int16x8_t __s0_641 = __p0_641; \
-  int16x8_t __s1_641 = __p1_641; \
-  int16x8_t __rev0_641;  __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_641;  __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_641; \
-  __ret_641 = __rev0_641 * __noswap_splatq_laneq_s16(__rev1_641, __p2_641); \
-  __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_641; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u32(__p0_642, __p1_642, __p2_642) __extension__ ({ \
-  uint32x2_t __s0_642 = __p0_642; \
-  uint32x4_t __s1_642 = __p1_642; \
-  uint32x2_t __ret_642; \
-  __ret_642 = __s0_642 * splat_laneq_u32(__s1_642, __p2_642); \
-  __ret_642; \
-})
-#else
-#define vmul_laneq_u32(__p0_643, __p1_643, __p2_643) __extension__ ({ \
-  uint32x2_t __s0_643 = __p0_643; \
-  uint32x4_t __s1_643 = __p1_643; \
-  uint32x2_t __rev0_643;  __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, 1, 0); \
-  uint32x4_t __rev1_643;  __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 3, 2, 1, 0); \
-  uint32x2_t __ret_643; \
-  __ret_643 = __rev0_643 * __noswap_splat_laneq_u32(__rev1_643, __p2_643); \
-  __ret_643 = __builtin_shufflevector(__ret_643, __ret_643, 1, 0); \
-  __ret_643; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u16(__p0_644, __p1_644, __p2_644) __extension__ ({ \
-  uint16x4_t __s0_644 = __p0_644; \
-  uint16x8_t __s1_644 = __p1_644; \
-  uint16x4_t __ret_644; \
-  __ret_644 = __s0_644 * splat_laneq_u16(__s1_644, __p2_644); \
-  __ret_644; \
-})
-#else
-#define vmul_laneq_u16(__p0_645, __p1_645, __p2_645) __extension__ ({ \
-  uint16x4_t __s0_645 = __p0_645; \
-  uint16x8_t __s1_645 = __p1_645; \
-  uint16x4_t __rev0_645;  __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, 3, 2, 1, 0); \
-  uint16x8_t __rev1_645;  __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_645; \
-  __ret_645 = __rev0_645 * __noswap_splat_laneq_u16(__rev1_645, __p2_645); \
-  __ret_645 = __builtin_shufflevector(__ret_645, __ret_645, 3, 2, 1, 0); \
-  __ret_645; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f32(__p0_646, __p1_646, __p2_646) __extension__ ({ \
-  float32x2_t __s0_646 = __p0_646; \
-  float32x4_t __s1_646 = __p1_646; \
-  float32x2_t __ret_646; \
-  __ret_646 = __s0_646 * splat_laneq_f32(__s1_646, __p2_646); \
+#define vmulq_laneq_u32(__p0_646, __p1_646, __p2_646) __extension__ ({ \
+  uint32x4_t __s0_646 = __p0_646; \
+  uint32x4_t __s1_646 = __p1_646; \
+  uint32x4_t __ret_646; \
+  __ret_646 = __s0_646 * splatq_laneq_u32(__s1_646, __p2_646); \
   __ret_646; \
 })
 #else
-#define vmul_laneq_f32(__p0_647, __p1_647, __p2_647) __extension__ ({ \
-  float32x2_t __s0_647 = __p0_647; \
-  float32x4_t __s1_647 = __p1_647; \
-  float32x2_t __rev0_647;  __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 1, 0); \
-  float32x4_t __rev1_647;  __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 3, 2, 1, 0); \
-  float32x2_t __ret_647; \
-  __ret_647 = __rev0_647 * __noswap_splat_laneq_f32(__rev1_647, __p2_647); \
-  __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 1, 0); \
+#define vmulq_laneq_u32(__p0_647, __p1_647, __p2_647) __extension__ ({ \
+  uint32x4_t __s0_647 = __p0_647; \
+  uint32x4_t __s1_647 = __p1_647; \
+  uint32x4_t __rev0_647;  __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 3, 2, 1, 0); \
+  uint32x4_t __rev1_647;  __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 3, 2, 1, 0); \
+  uint32x4_t __ret_647; \
+  __ret_647 = __rev0_647 * __noswap_splatq_laneq_u32(__rev1_647, __p2_647); \
+  __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 3, 2, 1, 0); \
   __ret_647; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \
-  int32x2_t __s0_648 = __p0_648; \
-  int32x4_t __s1_648 = __p1_648; \
-  int32x2_t __ret_648; \
-  __ret_648 = __s0_648 * splat_laneq_s32(__s1_648, __p2_648); \
+#define vmulq_laneq_u16(__p0_648, __p1_648, __p2_648) __extension__ ({ \
+  uint16x8_t __s0_648 = __p0_648; \
+  uint16x8_t __s1_648 = __p1_648; \
+  uint16x8_t __ret_648; \
+  __ret_648 = __s0_648 * splatq_laneq_u16(__s1_648, __p2_648); \
   __ret_648; \
 })
 #else
-#define vmul_laneq_s32(__p0_649, __p1_649, __p2_649) __extension__ ({ \
-  int32x2_t __s0_649 = __p0_649; \
-  int32x4_t __s1_649 = __p1_649; \
-  int32x2_t __rev0_649;  __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 1, 0); \
-  int32x4_t __rev1_649;  __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 3, 2, 1, 0); \
-  int32x2_t __ret_649; \
-  __ret_649 = __rev0_649 * __noswap_splat_laneq_s32(__rev1_649, __p2_649); \
-  __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 1, 0); \
+#define vmulq_laneq_u16(__p0_649, __p1_649, __p2_649) __extension__ ({ \
+  uint16x8_t __s0_649 = __p0_649; \
+  uint16x8_t __s1_649 = __p1_649; \
+  uint16x8_t __rev0_649;  __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_649;  __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_649; \
+  __ret_649 = __rev0_649 * __noswap_splatq_laneq_u16(__rev1_649, __p2_649); \
+  __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_649; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \
-  int16x4_t __s0_650 = __p0_650; \
-  int16x8_t __s1_650 = __p1_650; \
-  int16x4_t __ret_650; \
-  __ret_650 = __s0_650 * splat_laneq_s16(__s1_650, __p2_650); \
+#define vmulq_laneq_f64(__p0_650, __p1_650, __p2_650) __extension__ ({ \
+  float64x2_t __s0_650 = __p0_650; \
+  float64x2_t __s1_650 = __p1_650; \
+  float64x2_t __ret_650; \
+  __ret_650 = __s0_650 * splatq_laneq_f64(__s1_650, __p2_650); \
   __ret_650; \
 })
 #else
-#define vmul_laneq_s16(__p0_651, __p1_651, __p2_651) __extension__ ({ \
-  int16x4_t __s0_651 = __p0_651; \
-  int16x8_t __s1_651 = __p1_651; \
-  int16x4_t __rev0_651;  __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 3, 2, 1, 0); \
-  int16x8_t __rev1_651;  __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_651; \
-  __ret_651 = __rev0_651 * __noswap_splat_laneq_s16(__rev1_651, __p2_651); \
-  __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 3, 2, 1, 0); \
+#define vmulq_laneq_f64(__p0_651, __p1_651, __p2_651) __extension__ ({ \
+  float64x2_t __s0_651 = __p0_651; \
+  float64x2_t __s1_651 = __p1_651; \
+  float64x2_t __rev0_651;  __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 1, 0); \
+  float64x2_t __rev1_651;  __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 1, 0); \
+  float64x2_t __ret_651; \
+  __ret_651 = __rev0_651 * __noswap_splatq_laneq_f64(__rev1_651, __p2_651); \
+  __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 1, 0); \
   __ret_651; \
 })
 #endif
 
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_f32(__p0_652, __p1_652, __p2_652) __extension__ ({ \
+  float32x4_t __s0_652 = __p0_652; \
+  float32x4_t __s1_652 = __p1_652; \
+  float32x4_t __ret_652; \
+  __ret_652 = __s0_652 * splatq_laneq_f32(__s1_652, __p2_652); \
+  __ret_652; \
+})
+#else
+#define vmulq_laneq_f32(__p0_653, __p1_653, __p2_653) __extension__ ({ \
+  float32x4_t __s0_653 = __p0_653; \
+  float32x4_t __s1_653 = __p1_653; \
+  float32x4_t __rev0_653;  __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 3, 2, 1, 0); \
+  float32x4_t __rev1_653;  __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 3, 2, 1, 0); \
+  float32x4_t __ret_653; \
+  __ret_653 = __rev0_653 * __noswap_splatq_laneq_f32(__rev1_653, __p2_653); \
+  __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 3, 2, 1, 0); \
+  __ret_653; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_s32(__p0_654, __p1_654, __p2_654) __extension__ ({ \
+  int32x4_t __s0_654 = __p0_654; \
+  int32x4_t __s1_654 = __p1_654; \
+  int32x4_t __ret_654; \
+  __ret_654 = __s0_654 * splatq_laneq_s32(__s1_654, __p2_654); \
+  __ret_654; \
+})
+#else
+#define vmulq_laneq_s32(__p0_655, __p1_655, __p2_655) __extension__ ({ \
+  int32x4_t __s0_655 = __p0_655; \
+  int32x4_t __s1_655 = __p1_655; \
+  int32x4_t __rev0_655;  __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 3, 2, 1, 0); \
+  int32x4_t __rev1_655;  __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \
+  int32x4_t __ret_655; \
+  __ret_655 = __rev0_655 * __noswap_splatq_laneq_s32(__rev1_655, __p2_655); \
+  __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 3, 2, 1, 0); \
+  __ret_655; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_s16(__p0_656, __p1_656, __p2_656) __extension__ ({ \
+  int16x8_t __s0_656 = __p0_656; \
+  int16x8_t __s1_656 = __p1_656; \
+  int16x8_t __ret_656; \
+  __ret_656 = __s0_656 * splatq_laneq_s16(__s1_656, __p2_656); \
+  __ret_656; \
+})
+#else
+#define vmulq_laneq_s16(__p0_657, __p1_657, __p2_657) __extension__ ({ \
+  int16x8_t __s0_657 = __p0_657; \
+  int16x8_t __s1_657 = __p1_657; \
+  int16x8_t __rev0_657;  __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_657;  __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_657; \
+  __ret_657 = __rev0_657 * __noswap_splatq_laneq_s16(__rev1_657, __p2_657); \
+  __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_657; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_u32(__p0_658, __p1_658, __p2_658) __extension__ ({ \
+  uint32x2_t __s0_658 = __p0_658; \
+  uint32x4_t __s1_658 = __p1_658; \
+  uint32x2_t __ret_658; \
+  __ret_658 = __s0_658 * splat_laneq_u32(__s1_658, __p2_658); \
+  __ret_658; \
+})
+#else
+#define vmul_laneq_u32(__p0_659, __p1_659, __p2_659) __extension__ ({ \
+  uint32x2_t __s0_659 = __p0_659; \
+  uint32x4_t __s1_659 = __p1_659; \
+  uint32x2_t __rev0_659;  __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 1, 0); \
+  uint32x4_t __rev1_659;  __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \
+  uint32x2_t __ret_659; \
+  __ret_659 = __rev0_659 * __noswap_splat_laneq_u32(__rev1_659, __p2_659); \
+  __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 1, 0); \
+  __ret_659; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_u16(__p0_660, __p1_660, __p2_660) __extension__ ({ \
+  uint16x4_t __s0_660 = __p0_660; \
+  uint16x8_t __s1_660 = __p1_660; \
+  uint16x4_t __ret_660; \
+  __ret_660 = __s0_660 * splat_laneq_u16(__s1_660, __p2_660); \
+  __ret_660; \
+})
+#else
+#define vmul_laneq_u16(__p0_661, __p1_661, __p2_661) __extension__ ({ \
+  uint16x4_t __s0_661 = __p0_661; \
+  uint16x8_t __s1_661 = __p1_661; \
+  uint16x4_t __rev0_661;  __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \
+  uint16x8_t __rev1_661;  __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_661; \
+  __ret_661 = __rev0_661 * __noswap_splat_laneq_u16(__rev1_661, __p2_661); \
+  __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 3, 2, 1, 0); \
+  __ret_661; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_f32(__p0_662, __p1_662, __p2_662) __extension__ ({ \
+  float32x2_t __s0_662 = __p0_662; \
+  float32x4_t __s1_662 = __p1_662; \
+  float32x2_t __ret_662; \
+  __ret_662 = __s0_662 * splat_laneq_f32(__s1_662, __p2_662); \
+  __ret_662; \
+})
+#else
+#define vmul_laneq_f32(__p0_663, __p1_663, __p2_663) __extension__ ({ \
+  float32x2_t __s0_663 = __p0_663; \
+  float32x4_t __s1_663 = __p1_663; \
+  float32x2_t __rev0_663;  __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 1, 0); \
+  float32x4_t __rev1_663;  __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 3, 2, 1, 0); \
+  float32x2_t __ret_663; \
+  __ret_663 = __rev0_663 * __noswap_splat_laneq_f32(__rev1_663, __p2_663); \
+  __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 1, 0); \
+  __ret_663; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
+  int32x2_t __s0_664 = __p0_664; \
+  int32x4_t __s1_664 = __p1_664; \
+  int32x2_t __ret_664; \
+  __ret_664 = __s0_664 * splat_laneq_s32(__s1_664, __p2_664); \
+  __ret_664; \
+})
+#else
+#define vmul_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \
+  int32x2_t __s0_665 = __p0_665; \
+  int32x4_t __s1_665 = __p1_665; \
+  int32x2_t __rev0_665;  __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 1, 0); \
+  int32x4_t __rev1_665;  __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \
+  int32x2_t __ret_665; \
+  __ret_665 = __rev0_665 * __noswap_splat_laneq_s32(__rev1_665, __p2_665); \
+  __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \
+  __ret_665; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \
+  int16x4_t __s0_666 = __p0_666; \
+  int16x8_t __s1_666 = __p1_666; \
+  int16x4_t __ret_666; \
+  __ret_666 = __s0_666 * splat_laneq_s16(__s1_666, __p2_666); \
+  __ret_666; \
+})
+#else
+#define vmul_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
+  int16x4_t __s0_667 = __p0_667; \
+  int16x8_t __s1_667 = __p1_667; \
+  int16x4_t __rev0_667;  __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 3, 2, 1, 0); \
+  int16x8_t __rev1_667;  __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_667; \
+  __ret_667 = __rev0_667 * __noswap_splat_laneq_s16(__rev1_667, __p2_667); \
+  __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \
+  __ret_667; \
+})
+#endif
+
 __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
   float64x1_t __ret;
   __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1);
@@ -57053,170 +57241,170 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u32(__p0_652, __p1_652, __p2_652) __extension__ ({ \
-  uint32x4_t __s0_652 = __p0_652; \
-  uint32x2_t __s1_652 = __p1_652; \
-  uint64x2_t __ret_652; \
-  __ret_652 = vmull_u32(vget_high_u32(__s0_652), splat_lane_u32(__s1_652, __p2_652)); \
-  __ret_652; \
+#define vmull_high_lane_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \
+  uint32x4_t __s0_668 = __p0_668; \
+  uint32x2_t __s1_668 = __p1_668; \
+  uint64x2_t __ret_668; \
+  __ret_668 = vmull_u32(vget_high_u32(__s0_668), splat_lane_u32(__s1_668, __p2_668)); \
+  __ret_668; \
 })
 #else
-#define vmull_high_lane_u32(__p0_653, __p1_653, __p2_653) __extension__ ({ \
-  uint32x4_t __s0_653 = __p0_653; \
-  uint32x2_t __s1_653 = __p1_653; \
-  uint32x4_t __rev0_653;  __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 3, 2, 1, 0); \
-  uint32x2_t __rev1_653;  __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 1, 0); \
-  uint64x2_t __ret_653; \
-  __ret_653 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_653), __noswap_splat_lane_u32(__rev1_653, __p2_653)); \
-  __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 1, 0); \
-  __ret_653; \
+#define vmull_high_lane_u32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
+  uint32x4_t __s0_669 = __p0_669; \
+  uint32x2_t __s1_669 = __p1_669; \
+  uint32x4_t __rev0_669;  __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 3, 2, 1, 0); \
+  uint32x2_t __rev1_669;  __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 1, 0); \
+  uint64x2_t __ret_669; \
+  __ret_669 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_669), __noswap_splat_lane_u32(__rev1_669, __p2_669)); \
+  __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \
+  __ret_669; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u16(__p0_654, __p1_654, __p2_654) __extension__ ({ \
-  uint16x8_t __s0_654 = __p0_654; \
-  uint16x4_t __s1_654 = __p1_654; \
-  uint32x4_t __ret_654; \
-  __ret_654 = vmull_u16(vget_high_u16(__s0_654), splat_lane_u16(__s1_654, __p2_654)); \
-  __ret_654; \
+#define vmull_high_lane_u16(__p0_670, __p1_670, __p2_670) __extension__ ({ \
+  uint16x8_t __s0_670 = __p0_670; \
+  uint16x4_t __s1_670 = __p1_670; \
+  uint32x4_t __ret_670; \
+  __ret_670 = vmull_u16(vget_high_u16(__s0_670), splat_lane_u16(__s1_670, __p2_670)); \
+  __ret_670; \
 })
 #else
-#define vmull_high_lane_u16(__p0_655, __p1_655, __p2_655) __extension__ ({ \
-  uint16x8_t __s0_655 = __p0_655; \
-  uint16x4_t __s1_655 = __p1_655; \
-  uint16x8_t __rev0_655;  __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1_655;  __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \
-  uint32x4_t __ret_655; \
-  __ret_655 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_655), __noswap_splat_lane_u16(__rev1_655, __p2_655)); \
-  __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 3, 2, 1, 0); \
-  __ret_655; \
+#define vmull_high_lane_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \
+  uint16x8_t __s0_671 = __p0_671; \
+  uint16x4_t __s1_671 = __p1_671; \
+  uint16x8_t __rev0_671;  __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev1_671;  __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 3, 2, 1, 0); \
+  uint32x4_t __ret_671; \
+  __ret_671 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_671), __noswap_splat_lane_u16(__rev1_671, __p2_671)); \
+  __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \
+  __ret_671; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \
-  int32x4_t __s0_656 = __p0_656; \
-  int32x2_t __s1_656 = __p1_656; \
-  int64x2_t __ret_656; \
-  __ret_656 = vmull_s32(vget_high_s32(__s0_656), splat_lane_s32(__s1_656, __p2_656)); \
-  __ret_656; \
+#define vmull_high_lane_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \
+  int32x4_t __s0_672 = __p0_672; \
+  int32x2_t __s1_672 = __p1_672; \
+  int64x2_t __ret_672; \
+  __ret_672 = vmull_s32(vget_high_s32(__s0_672), splat_lane_s32(__s1_672, __p2_672)); \
+  __ret_672; \
 })
 #else
-#define vmull_high_lane_s32(__p0_657, __p1_657, __p2_657) __extension__ ({ \
-  int32x4_t __s0_657 = __p0_657; \
-  int32x2_t __s1_657 = __p1_657; \
-  int32x4_t __rev0_657;  __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 3, 2, 1, 0); \
-  int32x2_t __rev1_657;  __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 1, 0); \
-  int64x2_t __ret_657; \
-  __ret_657 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_657), __noswap_splat_lane_s32(__rev1_657, __p2_657)); \
-  __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 1, 0); \
-  __ret_657; \
+#define vmull_high_lane_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \
+  int32x4_t __s0_673 = __p0_673; \
+  int32x2_t __s1_673 = __p1_673; \
+  int32x4_t __rev0_673;  __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 3, 2, 1, 0); \
+  int32x2_t __rev1_673;  __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 1, 0); \
+  int64x2_t __ret_673; \
+  __ret_673 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_673), __noswap_splat_lane_s32(__rev1_673, __p2_673)); \
+  __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \
+  __ret_673; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \
-  int16x8_t __s0_658 = __p0_658; \
-  int16x4_t __s1_658 = __p1_658; \
-  int32x4_t __ret_658; \
-  __ret_658 = vmull_s16(vget_high_s16(__s0_658), splat_lane_s16(__s1_658, __p2_658)); \
-  __ret_658; \
+#define vmull_high_lane_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
+  int16x8_t __s0_674 = __p0_674; \
+  int16x4_t __s1_674 = __p1_674; \
+  int32x4_t __ret_674; \
+  __ret_674 = vmull_s16(vget_high_s16(__s0_674), splat_lane_s16(__s1_674, __p2_674)); \
+  __ret_674; \
 })
 #else
-#define vmull_high_lane_s16(__p0_659, __p1_659, __p2_659) __extension__ ({ \
-  int16x8_t __s0_659 = __p0_659; \
-  int16x4_t __s1_659 = __p1_659; \
-  int16x8_t __rev0_659;  __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_659;  __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \
-  int32x4_t __ret_659; \
-  __ret_659 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_659), __noswap_splat_lane_s16(__rev1_659, __p2_659)); \
-  __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 3, 2, 1, 0); \
-  __ret_659; \
+#define vmull_high_lane_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \
+  int16x8_t __s0_675 = __p0_675; \
+  int16x4_t __s1_675 = __p1_675; \
+  int16x8_t __rev0_675;  __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_675;  __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 3, 2, 1, 0); \
+  int32x4_t __ret_675; \
+  __ret_675 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_675), __noswap_splat_lane_s16(__rev1_675, __p2_675)); \
+  __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \
+  __ret_675; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u32(__p0_660, __p1_660, __p2_660) __extension__ ({ \
-  uint32x4_t __s0_660 = __p0_660; \
-  uint32x4_t __s1_660 = __p1_660; \
-  uint64x2_t __ret_660; \
-  __ret_660 = vmull_u32(vget_high_u32(__s0_660), splat_laneq_u32(__s1_660, __p2_660)); \
-  __ret_660; \
+#define vmull_high_laneq_u32(__p0_676, __p1_676, __p2_676) __extension__ ({ \
+  uint32x4_t __s0_676 = __p0_676; \
+  uint32x4_t __s1_676 = __p1_676; \
+  uint64x2_t __ret_676; \
+  __ret_676 = vmull_u32(vget_high_u32(__s0_676), splat_laneq_u32(__s1_676, __p2_676)); \
+  __ret_676; \
 })
 #else
-#define vmull_high_laneq_u32(__p0_661, __p1_661, __p2_661) __extension__ ({ \
-  uint32x4_t __s0_661 = __p0_661; \
-  uint32x4_t __s1_661 = __p1_661; \
-  uint32x4_t __rev0_661;  __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \
-  uint32x4_t __rev1_661;  __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 3, 2, 1, 0); \
-  uint64x2_t __ret_661; \
-  __ret_661 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_661), __noswap_splat_laneq_u32(__rev1_661, __p2_661)); \
-  __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 1, 0); \
-  __ret_661; \
+#define vmull_high_laneq_u32(__p0_677, __p1_677, __p2_677) __extension__ ({ \
+  uint32x4_t __s0_677 = __p0_677; \
+  uint32x4_t __s1_677 = __p1_677; \
+  uint32x4_t __rev0_677;  __rev0_677 = __builtin_shufflevector(__s0_677, __s0_677, 3, 2, 1, 0); \
+  uint32x4_t __rev1_677;  __rev1_677 = __builtin_shufflevector(__s1_677, __s1_677, 3, 2, 1, 0); \
+  uint64x2_t __ret_677; \
+  __ret_677 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_677), __noswap_splat_laneq_u32(__rev1_677, __p2_677)); \
+  __ret_677 = __builtin_shufflevector(__ret_677, __ret_677, 1, 0); \
+  __ret_677; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u16(__p0_662, __p1_662, __p2_662) __extension__ ({ \
-  uint16x8_t __s0_662 = __p0_662; \
-  uint16x8_t __s1_662 = __p1_662; \
-  uint32x4_t __ret_662; \
-  __ret_662 = vmull_u16(vget_high_u16(__s0_662), splat_laneq_u16(__s1_662, __p2_662)); \
-  __ret_662; \
+#define vmull_high_laneq_u16(__p0_678, __p1_678, __p2_678) __extension__ ({ \
+  uint16x8_t __s0_678 = __p0_678; \
+  uint16x8_t __s1_678 = __p1_678; \
+  uint32x4_t __ret_678; \
+  __ret_678 = vmull_u16(vget_high_u16(__s0_678), splat_laneq_u16(__s1_678, __p2_678)); \
+  __ret_678; \
 })
 #else
-#define vmull_high_laneq_u16(__p0_663, __p1_663, __p2_663) __extension__ ({ \
-  uint16x8_t __s0_663 = __p0_663; \
-  uint16x8_t __s1_663 = __p1_663; \
-  uint16x8_t __rev0_663;  __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_663;  __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_663; \
-  __ret_663 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_663), __noswap_splat_laneq_u16(__rev1_663, __p2_663)); \
-  __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 3, 2, 1, 0); \
-  __ret_663; \
+#define vmull_high_laneq_u16(__p0_679, __p1_679, __p2_679) __extension__ ({ \
+  uint16x8_t __s0_679 = __p0_679; \
+  uint16x8_t __s1_679 = __p1_679; \
+  uint16x8_t __rev0_679;  __rev0_679 = __builtin_shufflevector(__s0_679, __s0_679, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_679;  __rev1_679 = __builtin_shufflevector(__s1_679, __s1_679, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_679; \
+  __ret_679 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_679), __noswap_splat_laneq_u16(__rev1_679, __p2_679)); \
+  __ret_679 = __builtin_shufflevector(__ret_679, __ret_679, 3, 2, 1, 0); \
+  __ret_679; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
-  int32x4_t __s0_664 = __p0_664; \
-  int32x4_t __s1_664 = __p1_664; \
-  int64x2_t __ret_664; \
-  __ret_664 = vmull_s32(vget_high_s32(__s0_664), splat_laneq_s32(__s1_664, __p2_664)); \
-  __ret_664; \
+#define vmull_high_laneq_s32(__p0_680, __p1_680, __p2_680) __extension__ ({ \
+  int32x4_t __s0_680 = __p0_680; \
+  int32x4_t __s1_680 = __p1_680; \
+  int64x2_t __ret_680; \
+  __ret_680 = vmull_s32(vget_high_s32(__s0_680), splat_laneq_s32(__s1_680, __p2_680)); \
+  __ret_680; \
 })
 #else
-#define vmull_high_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \
-  int32x4_t __s0_665 = __p0_665; \
-  int32x4_t __s1_665 = __p1_665; \
-  int32x4_t __rev0_665;  __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 3, 2, 1, 0); \
-  int32x4_t __rev1_665;  __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \
-  int64x2_t __ret_665; \
-  __ret_665 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_665), __noswap_splat_laneq_s32(__rev1_665, __p2_665)); \
-  __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \
-  __ret_665; \
+#define vmull_high_laneq_s32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
+  int32x4_t __s0_681 = __p0_681; \
+  int32x4_t __s1_681 = __p1_681; \
+  int32x4_t __rev0_681;  __rev0_681 = __builtin_shufflevector(__s0_681, __s0_681, 3, 2, 1, 0); \
+  int32x4_t __rev1_681;  __rev1_681 = __builtin_shufflevector(__s1_681, __s1_681, 3, 2, 1, 0); \
+  int64x2_t __ret_681; \
+  __ret_681 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_681), __noswap_splat_laneq_s32(__rev1_681, __p2_681)); \
+  __ret_681 = __builtin_shufflevector(__ret_681, __ret_681, 1, 0); \
+  __ret_681; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \
-  int16x8_t __s0_666 = __p0_666; \
-  int16x8_t __s1_666 = __p1_666; \
-  int32x4_t __ret_666; \
-  __ret_666 = vmull_s16(vget_high_s16(__s0_666), splat_laneq_s16(__s1_666, __p2_666)); \
-  __ret_666; \
+#define vmull_high_laneq_s16(__p0_682, __p1_682, __p2_682) __extension__ ({ \
+  int16x8_t __s0_682 = __p0_682; \
+  int16x8_t __s1_682 = __p1_682; \
+  int32x4_t __ret_682; \
+  __ret_682 = vmull_s16(vget_high_s16(__s0_682), splat_laneq_s16(__s1_682, __p2_682)); \
+  __ret_682; \
 })
 #else
-#define vmull_high_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
-  int16x8_t __s0_667 = __p0_667; \
-  int16x8_t __s1_667 = __p1_667; \
-  int16x8_t __rev0_667;  __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_667;  __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_667; \
-  __ret_667 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_667), __noswap_splat_laneq_s16(__rev1_667, __p2_667)); \
-  __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \
-  __ret_667; \
+#define vmull_high_laneq_s16(__p0_683, __p1_683, __p2_683) __extension__ ({ \
+  int16x8_t __s0_683 = __p0_683; \
+  int16x8_t __s1_683 = __p1_683; \
+  int16x8_t __rev0_683;  __rev0_683 = __builtin_shufflevector(__s0_683, __s0_683, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_683;  __rev1_683 = __builtin_shufflevector(__s1_683, __s1_683, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_683; \
+  __ret_683 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_683), __noswap_splat_laneq_s16(__rev1_683, __p2_683)); \
+  __ret_683 = __builtin_shufflevector(__ret_683, __ret_683, 3, 2, 1, 0); \
+  __ret_683; \
 })
 #endif
 
@@ -57285,86 +57473,86 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \
-  uint32x2_t __s0_668 = __p0_668; \
-  uint32x4_t __s1_668 = __p1_668; \
-  uint64x2_t __ret_668; \
-  __ret_668 = vmull_u32(__s0_668, splat_laneq_u32(__s1_668, __p2_668)); \
-  __ret_668; \
+#define vmull_laneq_u32(__p0_684, __p1_684, __p2_684) __extension__ ({ \
+  uint32x2_t __s0_684 = __p0_684; \
+  uint32x4_t __s1_684 = __p1_684; \
+  uint64x2_t __ret_684; \
+  __ret_684 = vmull_u32(__s0_684, splat_laneq_u32(__s1_684, __p2_684)); \
+  __ret_684; \
 })
 #else
-#define vmull_laneq_u32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
-  uint32x2_t __s0_669 = __p0_669; \
-  uint32x4_t __s1_669 = __p1_669; \
-  uint32x2_t __rev0_669;  __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 1, 0); \
-  uint32x4_t __rev1_669;  __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 3, 2, 1, 0); \
-  uint64x2_t __ret_669; \
-  __ret_669 = __noswap_vmull_u32(__rev0_669, __noswap_splat_laneq_u32(__rev1_669, __p2_669)); \
-  __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \
-  __ret_669; \
+#define vmull_laneq_u32(__p0_685, __p1_685, __p2_685) __extension__ ({ \
+  uint32x2_t __s0_685 = __p0_685; \
+  uint32x4_t __s1_685 = __p1_685; \
+  uint32x2_t __rev0_685;  __rev0_685 = __builtin_shufflevector(__s0_685, __s0_685, 1, 0); \
+  uint32x4_t __rev1_685;  __rev1_685 = __builtin_shufflevector(__s1_685, __s1_685, 3, 2, 1, 0); \
+  uint64x2_t __ret_685; \
+  __ret_685 = __noswap_vmull_u32(__rev0_685, __noswap_splat_laneq_u32(__rev1_685, __p2_685)); \
+  __ret_685 = __builtin_shufflevector(__ret_685, __ret_685, 1, 0); \
+  __ret_685; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u16(__p0_670, __p1_670, __p2_670) __extension__ ({ \
-  uint16x4_t __s0_670 = __p0_670; \
-  uint16x8_t __s1_670 = __p1_670; \
-  uint32x4_t __ret_670; \
-  __ret_670 = vmull_u16(__s0_670, splat_laneq_u16(__s1_670, __p2_670)); \
-  __ret_670; \
+#define vmull_laneq_u16(__p0_686, __p1_686, __p2_686) __extension__ ({ \
+  uint16x4_t __s0_686 = __p0_686; \
+  uint16x8_t __s1_686 = __p1_686; \
+  uint32x4_t __ret_686; \
+  __ret_686 = vmull_u16(__s0_686, splat_laneq_u16(__s1_686, __p2_686)); \
+  __ret_686; \
 })
 #else
-#define vmull_laneq_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \
-  uint16x4_t __s0_671 = __p0_671; \
-  uint16x8_t __s1_671 = __p1_671; \
-  uint16x4_t __rev0_671;  __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 3, 2, 1, 0); \
-  uint16x8_t __rev1_671;  __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_671; \
-  __ret_671 = __noswap_vmull_u16(__rev0_671, __noswap_splat_laneq_u16(__rev1_671, __p2_671)); \
-  __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \
-  __ret_671; \
+#define vmull_laneq_u16(__p0_687, __p1_687, __p2_687) __extension__ ({ \
+  uint16x4_t __s0_687 = __p0_687; \
+  uint16x8_t __s1_687 = __p1_687; \
+  uint16x4_t __rev0_687;  __rev0_687 = __builtin_shufflevector(__s0_687, __s0_687, 3, 2, 1, 0); \
+  uint16x8_t __rev1_687;  __rev1_687 = __builtin_shufflevector(__s1_687, __s1_687, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_687; \
+  __ret_687 = __noswap_vmull_u16(__rev0_687, __noswap_splat_laneq_u16(__rev1_687, __p2_687)); \
+  __ret_687 = __builtin_shufflevector(__ret_687, __ret_687, 3, 2, 1, 0); \
+  __ret_687; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \
-  int32x2_t __s0_672 = __p0_672; \
-  int32x4_t __s1_672 = __p1_672; \
-  int64x2_t __ret_672; \
-  __ret_672 = vmull_s32(__s0_672, splat_laneq_s32(__s1_672, __p2_672)); \
-  __ret_672; \
+#define vmull_laneq_s32(__p0_688, __p1_688, __p2_688) __extension__ ({ \
+  int32x2_t __s0_688 = __p0_688; \
+  int32x4_t __s1_688 = __p1_688; \
+  int64x2_t __ret_688; \
+  __ret_688 = vmull_s32(__s0_688, splat_laneq_s32(__s1_688, __p2_688)); \
+  __ret_688; \
 })
 #else
-#define vmull_laneq_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \
-  int32x2_t __s0_673 = __p0_673; \
-  int32x4_t __s1_673 = __p1_673; \
-  int32x2_t __rev0_673;  __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 1, 0); \
-  int32x4_t __rev1_673;  __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 3, 2, 1, 0); \
-  int64x2_t __ret_673; \
-  __ret_673 = __noswap_vmull_s32(__rev0_673, __noswap_splat_laneq_s32(__rev1_673, __p2_673)); \
-  __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \
-  __ret_673; \
+#define vmull_laneq_s32(__p0_689, __p1_689, __p2_689) __extension__ ({ \
+  int32x2_t __s0_689 = __p0_689; \
+  int32x4_t __s1_689 = __p1_689; \
+  int32x2_t __rev0_689;  __rev0_689 = __builtin_shufflevector(__s0_689, __s0_689, 1, 0); \
+  int32x4_t __rev1_689;  __rev1_689 = __builtin_shufflevector(__s1_689, __s1_689, 3, 2, 1, 0); \
+  int64x2_t __ret_689; \
+  __ret_689 = __noswap_vmull_s32(__rev0_689, __noswap_splat_laneq_s32(__rev1_689, __p2_689)); \
+  __ret_689 = __builtin_shufflevector(__ret_689, __ret_689, 1, 0); \
+  __ret_689; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
-  int16x4_t __s0_674 = __p0_674; \
-  int16x8_t __s1_674 = __p1_674; \
-  int32x4_t __ret_674; \
-  __ret_674 = vmull_s16(__s0_674, splat_laneq_s16(__s1_674, __p2_674)); \
-  __ret_674; \
+#define vmull_laneq_s16(__p0_690, __p1_690, __p2_690) __extension__ ({ \
+  int16x4_t __s0_690 = __p0_690; \
+  int16x8_t __s1_690 = __p1_690; \
+  int32x4_t __ret_690; \
+  __ret_690 = vmull_s16(__s0_690, splat_laneq_s16(__s1_690, __p2_690)); \
+  __ret_690; \
 })
 #else
-#define vmull_laneq_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \
-  int16x4_t __s0_675 = __p0_675; \
-  int16x8_t __s1_675 = __p1_675; \
-  int16x4_t __rev0_675;  __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 3, 2, 1, 0); \
-  int16x8_t __rev1_675;  __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_675; \
-  __ret_675 = __noswap_vmull_s16(__rev0_675, __noswap_splat_laneq_s16(__rev1_675, __p2_675)); \
-  __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \
-  __ret_675; \
+#define vmull_laneq_s16(__p0_691, __p1_691, __p2_691) __extension__ ({ \
+  int16x4_t __s0_691 = __p0_691; \
+  int16x8_t __s1_691 = __p1_691; \
+  int16x4_t __rev0_691;  __rev0_691 = __builtin_shufflevector(__s0_691, __s0_691, 3, 2, 1, 0); \
+  int16x8_t __rev1_691;  __rev1_691 = __builtin_shufflevector(__s1_691, __s1_691, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_691; \
+  __ret_691 = __noswap_vmull_s16(__rev0_691, __noswap_splat_laneq_s16(__rev1_691, __p2_691)); \
+  __ret_691 = __builtin_shufflevector(__ret_691, __ret_691, 3, 2, 1, 0); \
+  __ret_691; \
 })
 #endif
 
@@ -57449,196 +57637,196 @@
   __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
   return __ret;
 }
-#define vmulxd_lane_f64(__p0_676, __p1_676, __p2_676) __extension__ ({ \
-  float64_t __s0_676 = __p0_676; \
-  float64x1_t __s1_676 = __p1_676; \
-  float64_t __ret_676; \
-  __ret_676 = vmulxd_f64(__s0_676, vget_lane_f64(__s1_676, __p2_676)); \
-  __ret_676; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_lane_f32(__p0_677, __p1_677, __p2_677) __extension__ ({ \
-  float32_t __s0_677 = __p0_677; \
-  float32x2_t __s1_677 = __p1_677; \
-  float32_t __ret_677; \
-  __ret_677 = vmulxs_f32(__s0_677, vget_lane_f32(__s1_677, __p2_677)); \
-  __ret_677; \
-})
-#else
-#define vmulxs_lane_f32(__p0_678, __p1_678, __p2_678) __extension__ ({ \
-  float32_t __s0_678 = __p0_678; \
-  float32x2_t __s1_678 = __p1_678; \
-  float32x2_t __rev1_678;  __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 1, 0); \
-  float32_t __ret_678; \
-  __ret_678 = vmulxs_f32(__s0_678, __noswap_vget_lane_f32(__rev1_678, __p2_678)); \
-  __ret_678; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f64(__p0_679, __p1_679, __p2_679) __extension__ ({ \
-  float64x2_t __s0_679 = __p0_679; \
-  float64x1_t __s1_679 = __p1_679; \
-  float64x2_t __ret_679; \
-  __ret_679 = vmulxq_f64(__s0_679, splatq_lane_f64(__s1_679, __p2_679)); \
-  __ret_679; \
-})
-#else
-#define vmulxq_lane_f64(__p0_680, __p1_680, __p2_680) __extension__ ({ \
-  float64x2_t __s0_680 = __p0_680; \
-  float64x1_t __s1_680 = __p1_680; \
-  float64x2_t __rev0_680;  __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 1, 0); \
-  float64x2_t __ret_680; \
-  __ret_680 = __noswap_vmulxq_f64(__rev0_680, __noswap_splatq_lane_f64(__s1_680, __p2_680)); \
-  __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 1, 0); \
-  __ret_680; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
-  float32x4_t __s0_681 = __p0_681; \
-  float32x2_t __s1_681 = __p1_681; \
-  float32x4_t __ret_681; \
-  __ret_681 = vmulxq_f32(__s0_681, splatq_lane_f32(__s1_681, __p2_681)); \
-  __ret_681; \
-})
-#else
-#define vmulxq_lane_f32(__p0_682, __p1_682, __p2_682) __extension__ ({ \
-  float32x4_t __s0_682 = __p0_682; \
-  float32x2_t __s1_682 = __p1_682; \
-  float32x4_t __rev0_682;  __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 3, 2, 1, 0); \
-  float32x2_t __rev1_682;  __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 1, 0); \
-  float32x4_t __ret_682; \
-  __ret_682 = __noswap_vmulxq_f32(__rev0_682, __noswap_splatq_lane_f32(__rev1_682, __p2_682)); \
-  __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 3, 2, 1, 0); \
-  __ret_682; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f32(__p0_683, __p1_683, __p2_683) __extension__ ({ \
-  float32x2_t __s0_683 = __p0_683; \
-  float32x2_t __s1_683 = __p1_683; \
-  float32x2_t __ret_683; \
-  __ret_683 = vmulx_f32(__s0_683, splat_lane_f32(__s1_683, __p2_683)); \
-  __ret_683; \
-})
-#else
-#define vmulx_lane_f32(__p0_684, __p1_684, __p2_684) __extension__ ({ \
-  float32x2_t __s0_684 = __p0_684; \
-  float32x2_t __s1_684 = __p1_684; \
-  float32x2_t __rev0_684;  __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 1, 0); \
-  float32x2_t __rev1_684;  __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 1, 0); \
-  float32x2_t __ret_684; \
-  __ret_684 = __noswap_vmulx_f32(__rev0_684, __noswap_splat_lane_f32(__rev1_684, __p2_684)); \
-  __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 1, 0); \
-  __ret_684; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxd_laneq_f64(__p0_685, __p1_685, __p2_685) __extension__ ({ \
-  float64_t __s0_685 = __p0_685; \
-  float64x2_t __s1_685 = __p1_685; \
-  float64_t __ret_685; \
-  __ret_685 = vmulxd_f64(__s0_685, vgetq_lane_f64(__s1_685, __p2_685)); \
-  __ret_685; \
-})
-#else
-#define vmulxd_laneq_f64(__p0_686, __p1_686, __p2_686) __extension__ ({ \
-  float64_t __s0_686 = __p0_686; \
-  float64x2_t __s1_686 = __p1_686; \
-  float64x2_t __rev1_686;  __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 1, 0); \
-  float64_t __ret_686; \
-  __ret_686 = vmulxd_f64(__s0_686, __noswap_vgetq_lane_f64(__rev1_686, __p2_686)); \
-  __ret_686; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_laneq_f32(__p0_687, __p1_687, __p2_687) __extension__ ({ \
-  float32_t __s0_687 = __p0_687; \
-  float32x4_t __s1_687 = __p1_687; \
-  float32_t __ret_687; \
-  __ret_687 = vmulxs_f32(__s0_687, vgetq_lane_f32(__s1_687, __p2_687)); \
-  __ret_687; \
-})
-#else
-#define vmulxs_laneq_f32(__p0_688, __p1_688, __p2_688) __extension__ ({ \
-  float32_t __s0_688 = __p0_688; \
-  float32x4_t __s1_688 = __p1_688; \
-  float32x4_t __rev1_688;  __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 3, 2, 1, 0); \
-  float32_t __ret_688; \
-  __ret_688 = vmulxs_f32(__s0_688, __noswap_vgetq_lane_f32(__rev1_688, __p2_688)); \
-  __ret_688; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f64(__p0_689, __p1_689, __p2_689) __extension__ ({ \
-  float64x2_t __s0_689 = __p0_689; \
-  float64x2_t __s1_689 = __p1_689; \
-  float64x2_t __ret_689; \
-  __ret_689 = vmulxq_f64(__s0_689, splatq_laneq_f64(__s1_689, __p2_689)); \
-  __ret_689; \
-})
-#else
-#define vmulxq_laneq_f64(__p0_690, __p1_690, __p2_690) __extension__ ({ \
-  float64x2_t __s0_690 = __p0_690; \
-  float64x2_t __s1_690 = __p1_690; \
-  float64x2_t __rev0_690;  __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 1, 0); \
-  float64x2_t __rev1_690;  __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 1, 0); \
-  float64x2_t __ret_690; \
-  __ret_690 = __noswap_vmulxq_f64(__rev0_690, __noswap_splatq_laneq_f64(__rev1_690, __p2_690)); \
-  __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 1, 0); \
-  __ret_690; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f32(__p0_691, __p1_691, __p2_691) __extension__ ({ \
-  float32x4_t __s0_691 = __p0_691; \
-  float32x4_t __s1_691 = __p1_691; \
-  float32x4_t __ret_691; \
-  __ret_691 = vmulxq_f32(__s0_691, splatq_laneq_f32(__s1_691, __p2_691)); \
-  __ret_691; \
-})
-#else
-#define vmulxq_laneq_f32(__p0_692, __p1_692, __p2_692) __extension__ ({ \
-  float32x4_t __s0_692 = __p0_692; \
-  float32x4_t __s1_692 = __p1_692; \
-  float32x4_t __rev0_692;  __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 3, 2, 1, 0); \
-  float32x4_t __rev1_692;  __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 3, 2, 1, 0); \
-  float32x4_t __ret_692; \
-  __ret_692 = __noswap_vmulxq_f32(__rev0_692, __noswap_splatq_laneq_f32(__rev1_692, __p2_692)); \
-  __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 3, 2, 1, 0); \
+#define vmulxd_lane_f64(__p0_692, __p1_692, __p2_692) __extension__ ({ \
+  float64_t __s0_692 = __p0_692; \
+  float64x1_t __s1_692 = __p1_692; \
+  float64_t __ret_692; \
+  __ret_692 = vmulxd_f64(__s0_692, vget_lane_f64(__s1_692, __p2_692)); \
   __ret_692; \
 })
-#endif
-
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
-  float32x2_t __s0_693 = __p0_693; \
-  float32x4_t __s1_693 = __p1_693; \
-  float32x2_t __ret_693; \
-  __ret_693 = vmulx_f32(__s0_693, splat_laneq_f32(__s1_693, __p2_693)); \
+#define vmulxs_lane_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
+  float32_t __s0_693 = __p0_693; \
+  float32x2_t __s1_693 = __p1_693; \
+  float32_t __ret_693; \
+  __ret_693 = vmulxs_f32(__s0_693, vget_lane_f32(__s1_693, __p2_693)); \
   __ret_693; \
 })
 #else
-#define vmulx_laneq_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
-  float32x2_t __s0_694 = __p0_694; \
-  float32x4_t __s1_694 = __p1_694; \
-  float32x2_t __rev0_694;  __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 1, 0); \
-  float32x4_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 3, 2, 1, 0); \
-  float32x2_t __ret_694; \
-  __ret_694 = __noswap_vmulx_f32(__rev0_694, __noswap_splat_laneq_f32(__rev1_694, __p2_694)); \
-  __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 1, 0); \
+#define vmulxs_lane_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
+  float32_t __s0_694 = __p0_694; \
+  float32x2_t __s1_694 = __p1_694; \
+  float32x2_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 1, 0); \
+  float32_t __ret_694; \
+  __ret_694 = vmulxs_f32(__s0_694, __noswap_vget_lane_f32(__rev1_694, __p2_694)); \
   __ret_694; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vmulxq_lane_f64(__p0_695, __p1_695, __p2_695) __extension__ ({ \
+  float64x2_t __s0_695 = __p0_695; \
+  float64x1_t __s1_695 = __p1_695; \
+  float64x2_t __ret_695; \
+  __ret_695 = vmulxq_f64(__s0_695, splatq_lane_f64(__s1_695, __p2_695)); \
+  __ret_695; \
+})
+#else
+#define vmulxq_lane_f64(__p0_696, __p1_696, __p2_696) __extension__ ({ \
+  float64x2_t __s0_696 = __p0_696; \
+  float64x1_t __s1_696 = __p1_696; \
+  float64x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
+  float64x2_t __ret_696; \
+  __ret_696 = __noswap_vmulxq_f64(__rev0_696, __noswap_splatq_lane_f64(__s1_696, __p2_696)); \
+  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \
+  __ret_696; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_lane_f32(__p0_697, __p1_697, __p2_697) __extension__ ({ \
+  float32x4_t __s0_697 = __p0_697; \
+  float32x2_t __s1_697 = __p1_697; \
+  float32x4_t __ret_697; \
+  __ret_697 = vmulxq_f32(__s0_697, splatq_lane_f32(__s1_697, __p2_697)); \
+  __ret_697; \
+})
+#else
+#define vmulxq_lane_f32(__p0_698, __p1_698, __p2_698) __extension__ ({ \
+  float32x4_t __s0_698 = __p0_698; \
+  float32x2_t __s1_698 = __p1_698; \
+  float32x4_t __rev0_698;  __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \
+  float32x2_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 1, 0); \
+  float32x4_t __ret_698; \
+  __ret_698 = __noswap_vmulxq_f32(__rev0_698, __noswap_splatq_lane_f32(__rev1_698, __p2_698)); \
+  __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 3, 2, 1, 0); \
+  __ret_698; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_lane_f32(__p0_699, __p1_699, __p2_699) __extension__ ({ \
+  float32x2_t __s0_699 = __p0_699; \
+  float32x2_t __s1_699 = __p1_699; \
+  float32x2_t __ret_699; \
+  __ret_699 = vmulx_f32(__s0_699, splat_lane_f32(__s1_699, __p2_699)); \
+  __ret_699; \
+})
+#else
+#define vmulx_lane_f32(__p0_700, __p1_700, __p2_700) __extension__ ({ \
+  float32x2_t __s0_700 = __p0_700; \
+  float32x2_t __s1_700 = __p1_700; \
+  float32x2_t __rev0_700;  __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \
+  float32x2_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 1, 0); \
+  float32x2_t __ret_700; \
+  __ret_700 = __noswap_vmulx_f32(__rev0_700, __noswap_splat_lane_f32(__rev1_700, __p2_700)); \
+  __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 1, 0); \
+  __ret_700; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxd_laneq_f64(__p0_701, __p1_701, __p2_701) __extension__ ({ \
+  float64_t __s0_701 = __p0_701; \
+  float64x2_t __s1_701 = __p1_701; \
+  float64_t __ret_701; \
+  __ret_701 = vmulxd_f64(__s0_701, vgetq_lane_f64(__s1_701, __p2_701)); \
+  __ret_701; \
+})
+#else
+#define vmulxd_laneq_f64(__p0_702, __p1_702, __p2_702) __extension__ ({ \
+  float64_t __s0_702 = __p0_702; \
+  float64x2_t __s1_702 = __p1_702; \
+  float64x2_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 1, 0); \
+  float64_t __ret_702; \
+  __ret_702 = vmulxd_f64(__s0_702, __noswap_vgetq_lane_f64(__rev1_702, __p2_702)); \
+  __ret_702; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxs_laneq_f32(__p0_703, __p1_703, __p2_703) __extension__ ({ \
+  float32_t __s0_703 = __p0_703; \
+  float32x4_t __s1_703 = __p1_703; \
+  float32_t __ret_703; \
+  __ret_703 = vmulxs_f32(__s0_703, vgetq_lane_f32(__s1_703, __p2_703)); \
+  __ret_703; \
+})
+#else
+#define vmulxs_laneq_f32(__p0_704, __p1_704, __p2_704) __extension__ ({ \
+  float32_t __s0_704 = __p0_704; \
+  float32x4_t __s1_704 = __p1_704; \
+  float32x4_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 3, 2, 1, 0); \
+  float32_t __ret_704; \
+  __ret_704 = vmulxs_f32(__s0_704, __noswap_vgetq_lane_f32(__rev1_704, __p2_704)); \
+  __ret_704; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_laneq_f64(__p0_705, __p1_705, __p2_705) __extension__ ({ \
+  float64x2_t __s0_705 = __p0_705; \
+  float64x2_t __s1_705 = __p1_705; \
+  float64x2_t __ret_705; \
+  __ret_705 = vmulxq_f64(__s0_705, splatq_laneq_f64(__s1_705, __p2_705)); \
+  __ret_705; \
+})
+#else
+#define vmulxq_laneq_f64(__p0_706, __p1_706, __p2_706) __extension__ ({ \
+  float64x2_t __s0_706 = __p0_706; \
+  float64x2_t __s1_706 = __p1_706; \
+  float64x2_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 1, 0); \
+  float64x2_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 1, 0); \
+  float64x2_t __ret_706; \
+  __ret_706 = __noswap_vmulxq_f64(__rev0_706, __noswap_splatq_laneq_f64(__rev1_706, __p2_706)); \
+  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 1, 0); \
+  __ret_706; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_laneq_f32(__p0_707, __p1_707, __p2_707) __extension__ ({ \
+  float32x4_t __s0_707 = __p0_707; \
+  float32x4_t __s1_707 = __p1_707; \
+  float32x4_t __ret_707; \
+  __ret_707 = vmulxq_f32(__s0_707, splatq_laneq_f32(__s1_707, __p2_707)); \
+  __ret_707; \
+})
+#else
+#define vmulxq_laneq_f32(__p0_708, __p1_708, __p2_708) __extension__ ({ \
+  float32x4_t __s0_708 = __p0_708; \
+  float32x4_t __s1_708 = __p1_708; \
+  float32x4_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 3, 2, 1, 0); \
+  float32x4_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \
+  float32x4_t __ret_708; \
+  __ret_708 = __noswap_vmulxq_f32(__rev0_708, __noswap_splatq_laneq_f32(__rev1_708, __p2_708)); \
+  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 3, 2, 1, 0); \
+  __ret_708; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_laneq_f32(__p0_709, __p1_709, __p2_709) __extension__ ({ \
+  float32x2_t __s0_709 = __p0_709; \
+  float32x4_t __s1_709 = __p1_709; \
+  float32x2_t __ret_709; \
+  __ret_709 = vmulx_f32(__s0_709, splat_laneq_f32(__s1_709, __p2_709)); \
+  __ret_709; \
+})
+#else
+#define vmulx_laneq_f32(__p0_710, __p1_710, __p2_710) __extension__ ({ \
+  float32x2_t __s0_710 = __p0_710; \
+  float32x4_t __s1_710 = __p1_710; \
+  float32x2_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 1, 0); \
+  float32x4_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 3, 2, 1, 0); \
+  float32x2_t __ret_710; \
+  __ret_710 = __noswap_vmulx_f32(__rev0_710, __noswap_splat_laneq_f32(__rev1_710, __p2_710)); \
+  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 1, 0); \
+  __ret_710; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai float64x2_t vnegq_f64(float64x2_t __p0) {
   float64x2_t __ret;
   __ret = -__p0;
@@ -58537,98 +58725,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s32(__p0_695, __p1_695, __p2_695, __p3_695) __extension__ ({ \
-  int64x2_t __s0_695 = __p0_695; \
-  int32x4_t __s1_695 = __p1_695; \
-  int32x2_t __s2_695 = __p2_695; \
-  int64x2_t __ret_695; \
-  __ret_695 = vqdmlal_s32(__s0_695, vget_high_s32(__s1_695), splat_lane_s32(__s2_695, __p3_695)); \
-  __ret_695; \
+#define vqdmlal_high_lane_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \
+  int64x2_t __s0_711 = __p0_711; \
+  int32x4_t __s1_711 = __p1_711; \
+  int32x2_t __s2_711 = __p2_711; \
+  int64x2_t __ret_711; \
+  __ret_711 = vqdmlal_s32(__s0_711, vget_high_s32(__s1_711), splat_lane_s32(__s2_711, __p3_711)); \
+  __ret_711; \
 })
 #else
-#define vqdmlal_high_lane_s32(__p0_696, __p1_696, __p2_696, __p3_696) __extension__ ({ \
-  int64x2_t __s0_696 = __p0_696; \
-  int32x4_t __s1_696 = __p1_696; \
-  int32x2_t __s2_696 = __p2_696; \
-  int64x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
-  int32x4_t __rev1_696;  __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 3, 2, 1, 0); \
-  int32x2_t __rev2_696;  __rev2_696 = __builtin_shufflevector(__s2_696, __s2_696, 1, 0); \
-  int64x2_t __ret_696; \
-  __ret_696 = __noswap_vqdmlal_s32(__rev0_696, __noswap_vget_high_s32(__rev1_696), __noswap_splat_lane_s32(__rev2_696, __p3_696)); \
-  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \
-  __ret_696; \
+#define vqdmlal_high_lane_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \
+  int64x2_t __s0_712 = __p0_712; \
+  int32x4_t __s1_712 = __p1_712; \
+  int32x2_t __s2_712 = __p2_712; \
+  int64x2_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \
+  int32x4_t __rev1_712;  __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \
+  int32x2_t __rev2_712;  __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 1, 0); \
+  int64x2_t __ret_712; \
+  __ret_712 = __noswap_vqdmlal_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_lane_s32(__rev2_712, __p3_712)); \
+  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \
+  __ret_712; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s16(__p0_697, __p1_697, __p2_697, __p3_697) __extension__ ({ \
-  int32x4_t __s0_697 = __p0_697; \
-  int16x8_t __s1_697 = __p1_697; \
-  int16x4_t __s2_697 = __p2_697; \
-  int32x4_t __ret_697; \
-  __ret_697 = vqdmlal_s16(__s0_697, vget_high_s16(__s1_697), splat_lane_s16(__s2_697, __p3_697)); \
-  __ret_697; \
+#define vqdmlal_high_lane_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \
+  int32x4_t __s0_713 = __p0_713; \
+  int16x8_t __s1_713 = __p1_713; \
+  int16x4_t __s2_713 = __p2_713; \
+  int32x4_t __ret_713; \
+  __ret_713 = vqdmlal_s16(__s0_713, vget_high_s16(__s1_713), splat_lane_s16(__s2_713, __p3_713)); \
+  __ret_713; \
 })
 #else
-#define vqdmlal_high_lane_s16(__p0_698, __p1_698, __p2_698, __p3_698) __extension__ ({ \
-  int32x4_t __s0_698 = __p0_698; \
-  int16x8_t __s1_698 = __p1_698; \
-  int16x4_t __s2_698 = __p2_698; \
-  int32x4_t __rev0_698;  __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \
-  int16x8_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_698;  __rev2_698 = __builtin_shufflevector(__s2_698, __s2_698, 3, 2, 1, 0); \
-  int32x4_t __ret_698; \
-  __ret_698 = __noswap_vqdmlal_s16(__rev0_698, __noswap_vget_high_s16(__rev1_698), __noswap_splat_lane_s16(__rev2_698, __p3_698)); \
-  __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 3, 2, 1, 0); \
-  __ret_698; \
+#define vqdmlal_high_lane_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \
+  int32x4_t __s0_714 = __p0_714; \
+  int16x8_t __s1_714 = __p1_714; \
+  int16x4_t __s2_714 = __p2_714; \
+  int32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
+  int16x8_t __rev1_714;  __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_714;  __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 3, 2, 1, 0); \
+  int32x4_t __ret_714; \
+  __ret_714 = __noswap_vqdmlal_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_lane_s16(__rev2_714, __p3_714)); \
+  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \
+  __ret_714; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s32(__p0_699, __p1_699, __p2_699, __p3_699) __extension__ ({ \
-  int64x2_t __s0_699 = __p0_699; \
-  int32x4_t __s1_699 = __p1_699; \
-  int32x4_t __s2_699 = __p2_699; \
-  int64x2_t __ret_699; \
-  __ret_699 = vqdmlal_s32(__s0_699, vget_high_s32(__s1_699), splat_laneq_s32(__s2_699, __p3_699)); \
-  __ret_699; \
+#define vqdmlal_high_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \
+  int64x2_t __s0_715 = __p0_715; \
+  int32x4_t __s1_715 = __p1_715; \
+  int32x4_t __s2_715 = __p2_715; \
+  int64x2_t __ret_715; \
+  __ret_715 = vqdmlal_s32(__s0_715, vget_high_s32(__s1_715), splat_laneq_s32(__s2_715, __p3_715)); \
+  __ret_715; \
 })
 #else
-#define vqdmlal_high_laneq_s32(__p0_700, __p1_700, __p2_700, __p3_700) __extension__ ({ \
-  int64x2_t __s0_700 = __p0_700; \
-  int32x4_t __s1_700 = __p1_700; \
-  int32x4_t __s2_700 = __p2_700; \
-  int64x2_t __rev0_700;  __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \
-  int32x4_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 3, 2, 1, 0); \
-  int32x4_t __rev2_700;  __rev2_700 = __builtin_shufflevector(__s2_700, __s2_700, 3, 2, 1, 0); \
-  int64x2_t __ret_700; \
-  __ret_700 = __noswap_vqdmlal_s32(__rev0_700, __noswap_vget_high_s32(__rev1_700), __noswap_splat_laneq_s32(__rev2_700, __p3_700)); \
-  __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 1, 0); \
-  __ret_700; \
+#define vqdmlal_high_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \
+  int64x2_t __s0_716 = __p0_716; \
+  int32x4_t __s1_716 = __p1_716; \
+  int32x4_t __s2_716 = __p2_716; \
+  int64x2_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \
+  int32x4_t __rev1_716;  __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 3, 2, 1, 0); \
+  int32x4_t __rev2_716;  __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \
+  int64x2_t __ret_716; \
+  __ret_716 = __noswap_vqdmlal_s32(__rev0_716, __noswap_vget_high_s32(__rev1_716), __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \
+  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \
+  __ret_716; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s16(__p0_701, __p1_701, __p2_701, __p3_701) __extension__ ({ \
-  int32x4_t __s0_701 = __p0_701; \
-  int16x8_t __s1_701 = __p1_701; \
-  int16x8_t __s2_701 = __p2_701; \
-  int32x4_t __ret_701; \
-  __ret_701 = vqdmlal_s16(__s0_701, vget_high_s16(__s1_701), splat_laneq_s16(__s2_701, __p3_701)); \
-  __ret_701; \
+#define vqdmlal_high_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \
+  int32x4_t __s0_717 = __p0_717; \
+  int16x8_t __s1_717 = __p1_717; \
+  int16x8_t __s2_717 = __p2_717; \
+  int32x4_t __ret_717; \
+  __ret_717 = vqdmlal_s16(__s0_717, vget_high_s16(__s1_717), splat_laneq_s16(__s2_717, __p3_717)); \
+  __ret_717; \
 })
 #else
-#define vqdmlal_high_laneq_s16(__p0_702, __p1_702, __p2_702, __p3_702) __extension__ ({ \
-  int32x4_t __s0_702 = __p0_702; \
-  int16x8_t __s1_702 = __p1_702; \
-  int16x8_t __s2_702 = __p2_702; \
-  int32x4_t __rev0_702;  __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 3, 2, 1, 0); \
-  int16x8_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_702;  __rev2_702 = __builtin_shufflevector(__s2_702, __s2_702, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_702; \
-  __ret_702 = __noswap_vqdmlal_s16(__rev0_702, __noswap_vget_high_s16(__rev1_702), __noswap_splat_laneq_s16(__rev2_702, __p3_702)); \
-  __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 3, 2, 1, 0); \
-  __ret_702; \
+#define vqdmlal_high_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \
+  int32x4_t __s0_718 = __p0_718; \
+  int16x8_t __s1_718 = __p1_718; \
+  int16x8_t __s2_718 = __p2_718; \
+  int32x4_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \
+  int16x8_t __rev1_718;  __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_718;  __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_718; \
+  __ret_718 = __noswap_vqdmlal_s16(__rev0_718, __noswap_vget_high_s16(__rev1_718), __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \
+  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \
+  __ret_718; \
 })
 #endif
 
@@ -58751,50 +58939,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s32(__p0_703, __p1_703, __p2_703, __p3_703) __extension__ ({ \
-  int64x2_t __s0_703 = __p0_703; \
-  int32x2_t __s1_703 = __p1_703; \
-  int32x4_t __s2_703 = __p2_703; \
-  int64x2_t __ret_703; \
-  __ret_703 = vqdmlal_s32(__s0_703, __s1_703, splat_laneq_s32(__s2_703, __p3_703)); \
-  __ret_703; \
+#define vqdmlal_laneq_s32(__p0_719, __p1_719, __p2_719, __p3_719) __extension__ ({ \
+  int64x2_t __s0_719 = __p0_719; \
+  int32x2_t __s1_719 = __p1_719; \
+  int32x4_t __s2_719 = __p2_719; \
+  int64x2_t __ret_719; \
+  __ret_719 = vqdmlal_s32(__s0_719, __s1_719, splat_laneq_s32(__s2_719, __p3_719)); \
+  __ret_719; \
 })
 #else
-#define vqdmlal_laneq_s32(__p0_704, __p1_704, __p2_704, __p3_704) __extension__ ({ \
-  int64x2_t __s0_704 = __p0_704; \
-  int32x2_t __s1_704 = __p1_704; \
-  int32x4_t __s2_704 = __p2_704; \
-  int64x2_t __rev0_704;  __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 1, 0); \
-  int32x2_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 1, 0); \
-  int32x4_t __rev2_704;  __rev2_704 = __builtin_shufflevector(__s2_704, __s2_704, 3, 2, 1, 0); \
-  int64x2_t __ret_704; \
-  __ret_704 = __noswap_vqdmlal_s32(__rev0_704, __rev1_704, __noswap_splat_laneq_s32(__rev2_704, __p3_704)); \
-  __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 1, 0); \
-  __ret_704; \
+#define vqdmlal_laneq_s32(__p0_720, __p1_720, __p2_720, __p3_720) __extension__ ({ \
+  int64x2_t __s0_720 = __p0_720; \
+  int32x2_t __s1_720 = __p1_720; \
+  int32x4_t __s2_720 = __p2_720; \
+  int64x2_t __rev0_720;  __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 1, 0); \
+  int32x2_t __rev1_720;  __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 1, 0); \
+  int32x4_t __rev2_720;  __rev2_720 = __builtin_shufflevector(__s2_720, __s2_720, 3, 2, 1, 0); \
+  int64x2_t __ret_720; \
+  __ret_720 = __noswap_vqdmlal_s32(__rev0_720, __rev1_720, __noswap_splat_laneq_s32(__rev2_720, __p3_720)); \
+  __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 1, 0); \
+  __ret_720; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s16(__p0_705, __p1_705, __p2_705, __p3_705) __extension__ ({ \
-  int32x4_t __s0_705 = __p0_705; \
-  int16x4_t __s1_705 = __p1_705; \
-  int16x8_t __s2_705 = __p2_705; \
-  int32x4_t __ret_705; \
-  __ret_705 = vqdmlal_s16(__s0_705, __s1_705, splat_laneq_s16(__s2_705, __p3_705)); \
-  __ret_705; \
+#define vqdmlal_laneq_s16(__p0_721, __p1_721, __p2_721, __p3_721) __extension__ ({ \
+  int32x4_t __s0_721 = __p0_721; \
+  int16x4_t __s1_721 = __p1_721; \
+  int16x8_t __s2_721 = __p2_721; \
+  int32x4_t __ret_721; \
+  __ret_721 = vqdmlal_s16(__s0_721, __s1_721, splat_laneq_s16(__s2_721, __p3_721)); \
+  __ret_721; \
 })
 #else
-#define vqdmlal_laneq_s16(__p0_706, __p1_706, __p2_706, __p3_706) __extension__ ({ \
-  int32x4_t __s0_706 = __p0_706; \
-  int16x4_t __s1_706 = __p1_706; \
-  int16x8_t __s2_706 = __p2_706; \
-  int32x4_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 3, 2, 1, 0); \
-  int16x4_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 3, 2, 1, 0); \
-  int16x8_t __rev2_706;  __rev2_706 = __builtin_shufflevector(__s2_706, __s2_706, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_706; \
-  __ret_706 = __noswap_vqdmlal_s16(__rev0_706, __rev1_706, __noswap_splat_laneq_s16(__rev2_706, __p3_706)); \
-  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 3, 2, 1, 0); \
-  __ret_706; \
+#define vqdmlal_laneq_s16(__p0_722, __p1_722, __p2_722, __p3_722) __extension__ ({ \
+  int32x4_t __s0_722 = __p0_722; \
+  int16x4_t __s1_722 = __p1_722; \
+  int16x8_t __s2_722 = __p2_722; \
+  int32x4_t __rev0_722;  __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 3, 2, 1, 0); \
+  int16x4_t __rev1_722;  __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 3, 2, 1, 0); \
+  int16x8_t __rev2_722;  __rev2_722 = __builtin_shufflevector(__s2_722, __s2_722, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_722; \
+  __ret_722 = __noswap_vqdmlal_s16(__rev0_722, __rev1_722, __noswap_splat_laneq_s16(__rev2_722, __p3_722)); \
+  __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 3, 2, 1, 0); \
+  __ret_722; \
 })
 #endif
 
@@ -58845,98 +59033,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s32(__p0_707, __p1_707, __p2_707, __p3_707) __extension__ ({ \
-  int64x2_t __s0_707 = __p0_707; \
-  int32x4_t __s1_707 = __p1_707; \
-  int32x2_t __s2_707 = __p2_707; \
-  int64x2_t __ret_707; \
-  __ret_707 = vqdmlsl_s32(__s0_707, vget_high_s32(__s1_707), splat_lane_s32(__s2_707, __p3_707)); \
-  __ret_707; \
+#define vqdmlsl_high_lane_s32(__p0_723, __p1_723, __p2_723, __p3_723) __extension__ ({ \
+  int64x2_t __s0_723 = __p0_723; \
+  int32x4_t __s1_723 = __p1_723; \
+  int32x2_t __s2_723 = __p2_723; \
+  int64x2_t __ret_723; \
+  __ret_723 = vqdmlsl_s32(__s0_723, vget_high_s32(__s1_723), splat_lane_s32(__s2_723, __p3_723)); \
+  __ret_723; \
 })
 #else
-#define vqdmlsl_high_lane_s32(__p0_708, __p1_708, __p2_708, __p3_708) __extension__ ({ \
-  int64x2_t __s0_708 = __p0_708; \
-  int32x4_t __s1_708 = __p1_708; \
-  int32x2_t __s2_708 = __p2_708; \
-  int64x2_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 1, 0); \
-  int32x4_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \
-  int32x2_t __rev2_708;  __rev2_708 = __builtin_shufflevector(__s2_708, __s2_708, 1, 0); \
-  int64x2_t __ret_708; \
-  __ret_708 = __noswap_vqdmlsl_s32(__rev0_708, __noswap_vget_high_s32(__rev1_708), __noswap_splat_lane_s32(__rev2_708, __p3_708)); \
-  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 1, 0); \
-  __ret_708; \
+#define vqdmlsl_high_lane_s32(__p0_724, __p1_724, __p2_724, __p3_724) __extension__ ({ \
+  int64x2_t __s0_724 = __p0_724; \
+  int32x4_t __s1_724 = __p1_724; \
+  int32x2_t __s2_724 = __p2_724; \
+  int64x2_t __rev0_724;  __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 1, 0); \
+  int32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
+  int32x2_t __rev2_724;  __rev2_724 = __builtin_shufflevector(__s2_724, __s2_724, 1, 0); \
+  int64x2_t __ret_724; \
+  __ret_724 = __noswap_vqdmlsl_s32(__rev0_724, __noswap_vget_high_s32(__rev1_724), __noswap_splat_lane_s32(__rev2_724, __p3_724)); \
+  __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 1, 0); \
+  __ret_724; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s16(__p0_709, __p1_709, __p2_709, __p3_709) __extension__ ({ \
-  int32x4_t __s0_709 = __p0_709; \
-  int16x8_t __s1_709 = __p1_709; \
-  int16x4_t __s2_709 = __p2_709; \
-  int32x4_t __ret_709; \
-  __ret_709 = vqdmlsl_s16(__s0_709, vget_high_s16(__s1_709), splat_lane_s16(__s2_709, __p3_709)); \
-  __ret_709; \
+#define vqdmlsl_high_lane_s16(__p0_725, __p1_725, __p2_725, __p3_725) __extension__ ({ \
+  int32x4_t __s0_725 = __p0_725; \
+  int16x8_t __s1_725 = __p1_725; \
+  int16x4_t __s2_725 = __p2_725; \
+  int32x4_t __ret_725; \
+  __ret_725 = vqdmlsl_s16(__s0_725, vget_high_s16(__s1_725), splat_lane_s16(__s2_725, __p3_725)); \
+  __ret_725; \
 })
 #else
-#define vqdmlsl_high_lane_s16(__p0_710, __p1_710, __p2_710, __p3_710) __extension__ ({ \
-  int32x4_t __s0_710 = __p0_710; \
-  int16x8_t __s1_710 = __p1_710; \
-  int16x4_t __s2_710 = __p2_710; \
-  int32x4_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \
-  int16x8_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_710;  __rev2_710 = __builtin_shufflevector(__s2_710, __s2_710, 3, 2, 1, 0); \
-  int32x4_t __ret_710; \
-  __ret_710 = __noswap_vqdmlsl_s16(__rev0_710, __noswap_vget_high_s16(__rev1_710), __noswap_splat_lane_s16(__rev2_710, __p3_710)); \
-  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 3, 2, 1, 0); \
-  __ret_710; \
+#define vqdmlsl_high_lane_s16(__p0_726, __p1_726, __p2_726, __p3_726) __extension__ ({ \
+  int32x4_t __s0_726 = __p0_726; \
+  int16x8_t __s1_726 = __p1_726; \
+  int16x4_t __s2_726 = __p2_726; \
+  int32x4_t __rev0_726;  __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 3, 2, 1, 0); \
+  int16x8_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_726;  __rev2_726 = __builtin_shufflevector(__s2_726, __s2_726, 3, 2, 1, 0); \
+  int32x4_t __ret_726; \
+  __ret_726 = __noswap_vqdmlsl_s16(__rev0_726, __noswap_vget_high_s16(__rev1_726), __noswap_splat_lane_s16(__rev2_726, __p3_726)); \
+  __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \
+  __ret_726; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \
-  int64x2_t __s0_711 = __p0_711; \
-  int32x4_t __s1_711 = __p1_711; \
-  int32x4_t __s2_711 = __p2_711; \
-  int64x2_t __ret_711; \
-  __ret_711 = vqdmlsl_s32(__s0_711, vget_high_s32(__s1_711), splat_laneq_s32(__s2_711, __p3_711)); \
-  __ret_711; \
+#define vqdmlsl_high_laneq_s32(__p0_727, __p1_727, __p2_727, __p3_727) __extension__ ({ \
+  int64x2_t __s0_727 = __p0_727; \
+  int32x4_t __s1_727 = __p1_727; \
+  int32x4_t __s2_727 = __p2_727; \
+  int64x2_t __ret_727; \
+  __ret_727 = vqdmlsl_s32(__s0_727, vget_high_s32(__s1_727), splat_laneq_s32(__s2_727, __p3_727)); \
+  __ret_727; \
 })
 #else
-#define vqdmlsl_high_laneq_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \
-  int64x2_t __s0_712 = __p0_712; \
-  int32x4_t __s1_712 = __p1_712; \
-  int32x4_t __s2_712 = __p2_712; \
-  int64x2_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \
-  int32x4_t __rev1_712;  __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \
-  int32x4_t __rev2_712;  __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 3, 2, 1, 0); \
-  int64x2_t __ret_712; \
-  __ret_712 = __noswap_vqdmlsl_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_laneq_s32(__rev2_712, __p3_712)); \
-  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \
-  __ret_712; \
+#define vqdmlsl_high_laneq_s32(__p0_728, __p1_728, __p2_728, __p3_728) __extension__ ({ \
+  int64x2_t __s0_728 = __p0_728; \
+  int32x4_t __s1_728 = __p1_728; \
+  int32x4_t __s2_728 = __p2_728; \
+  int64x2_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 1, 0); \
+  int32x4_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 3, 2, 1, 0); \
+  int32x4_t __rev2_728;  __rev2_728 = __builtin_shufflevector(__s2_728, __s2_728, 3, 2, 1, 0); \
+  int64x2_t __ret_728; \
+  __ret_728 = __noswap_vqdmlsl_s32(__rev0_728, __noswap_vget_high_s32(__rev1_728), __noswap_splat_laneq_s32(__rev2_728, __p3_728)); \
+  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \
+  __ret_728; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \
-  int32x4_t __s0_713 = __p0_713; \
-  int16x8_t __s1_713 = __p1_713; \
-  int16x8_t __s2_713 = __p2_713; \
-  int32x4_t __ret_713; \
-  __ret_713 = vqdmlsl_s16(__s0_713, vget_high_s16(__s1_713), splat_laneq_s16(__s2_713, __p3_713)); \
-  __ret_713; \
+#define vqdmlsl_high_laneq_s16(__p0_729, __p1_729, __p2_729, __p3_729) __extension__ ({ \
+  int32x4_t __s0_729 = __p0_729; \
+  int16x8_t __s1_729 = __p1_729; \
+  int16x8_t __s2_729 = __p2_729; \
+  int32x4_t __ret_729; \
+  __ret_729 = vqdmlsl_s16(__s0_729, vget_high_s16(__s1_729), splat_laneq_s16(__s2_729, __p3_729)); \
+  __ret_729; \
 })
 #else
-#define vqdmlsl_high_laneq_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \
-  int32x4_t __s0_714 = __p0_714; \
-  int16x8_t __s1_714 = __p1_714; \
-  int16x8_t __s2_714 = __p2_714; \
-  int32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
-  int16x8_t __rev1_714;  __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_714;  __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_714; \
-  __ret_714 = __noswap_vqdmlsl_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_laneq_s16(__rev2_714, __p3_714)); \
-  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \
-  __ret_714; \
+#define vqdmlsl_high_laneq_s16(__p0_730, __p1_730, __p2_730, __p3_730) __extension__ ({ \
+  int32x4_t __s0_730 = __p0_730; \
+  int16x8_t __s1_730 = __p1_730; \
+  int16x8_t __s2_730 = __p2_730; \
+  int32x4_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 3, 2, 1, 0); \
+  int16x8_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_730;  __rev2_730 = __builtin_shufflevector(__s2_730, __s2_730, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_730; \
+  __ret_730 = __noswap_vqdmlsl_s16(__rev0_730, __noswap_vget_high_s16(__rev1_730), __noswap_splat_laneq_s16(__rev2_730, __p3_730)); \
+  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \
+  __ret_730; \
 })
 #endif
 
@@ -59059,50 +59247,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \
-  int64x2_t __s0_715 = __p0_715; \
-  int32x2_t __s1_715 = __p1_715; \
-  int32x4_t __s2_715 = __p2_715; \
-  int64x2_t __ret_715; \
-  __ret_715 = vqdmlsl_s32(__s0_715, __s1_715, splat_laneq_s32(__s2_715, __p3_715)); \
-  __ret_715; \
+#define vqdmlsl_laneq_s32(__p0_731, __p1_731, __p2_731, __p3_731) __extension__ ({ \
+  int64x2_t __s0_731 = __p0_731; \
+  int32x2_t __s1_731 = __p1_731; \
+  int32x4_t __s2_731 = __p2_731; \
+  int64x2_t __ret_731; \
+  __ret_731 = vqdmlsl_s32(__s0_731, __s1_731, splat_laneq_s32(__s2_731, __p3_731)); \
+  __ret_731; \
 })
 #else
-#define vqdmlsl_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \
-  int64x2_t __s0_716 = __p0_716; \
-  int32x2_t __s1_716 = __p1_716; \
-  int32x4_t __s2_716 = __p2_716; \
-  int64x2_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \
-  int32x2_t __rev1_716;  __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 1, 0); \
-  int32x4_t __rev2_716;  __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \
-  int64x2_t __ret_716; \
-  __ret_716 = __noswap_vqdmlsl_s32(__rev0_716, __rev1_716, __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \
-  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \
-  __ret_716; \
+#define vqdmlsl_laneq_s32(__p0_732, __p1_732, __p2_732, __p3_732) __extension__ ({ \
+  int64x2_t __s0_732 = __p0_732; \
+  int32x2_t __s1_732 = __p1_732; \
+  int32x4_t __s2_732 = __p2_732; \
+  int64x2_t __rev0_732;  __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 1, 0); \
+  int32x2_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 1, 0); \
+  int32x4_t __rev2_732;  __rev2_732 = __builtin_shufflevector(__s2_732, __s2_732, 3, 2, 1, 0); \
+  int64x2_t __ret_732; \
+  __ret_732 = __noswap_vqdmlsl_s32(__rev0_732, __rev1_732, __noswap_splat_laneq_s32(__rev2_732, __p3_732)); \
+  __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 1, 0); \
+  __ret_732; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \
-  int32x4_t __s0_717 = __p0_717; \
-  int16x4_t __s1_717 = __p1_717; \
-  int16x8_t __s2_717 = __p2_717; \
-  int32x4_t __ret_717; \
-  __ret_717 = vqdmlsl_s16(__s0_717, __s1_717, splat_laneq_s16(__s2_717, __p3_717)); \
-  __ret_717; \
+#define vqdmlsl_laneq_s16(__p0_733, __p1_733, __p2_733, __p3_733) __extension__ ({ \
+  int32x4_t __s0_733 = __p0_733; \
+  int16x4_t __s1_733 = __p1_733; \
+  int16x8_t __s2_733 = __p2_733; \
+  int32x4_t __ret_733; \
+  __ret_733 = vqdmlsl_s16(__s0_733, __s1_733, splat_laneq_s16(__s2_733, __p3_733)); \
+  __ret_733; \
 })
 #else
-#define vqdmlsl_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \
-  int32x4_t __s0_718 = __p0_718; \
-  int16x4_t __s1_718 = __p1_718; \
-  int16x8_t __s2_718 = __p2_718; \
-  int32x4_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \
-  int16x4_t __rev1_718;  __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 3, 2, 1, 0); \
-  int16x8_t __rev2_718;  __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_718; \
-  __ret_718 = __noswap_vqdmlsl_s16(__rev0_718, __rev1_718, __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \
-  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \
-  __ret_718; \
+#define vqdmlsl_laneq_s16(__p0_734, __p1_734, __p2_734, __p3_734) __extension__ ({ \
+  int32x4_t __s0_734 = __p0_734; \
+  int16x4_t __s1_734 = __p1_734; \
+  int16x8_t __s2_734 = __p2_734; \
+  int32x4_t __rev0_734;  __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 3, 2, 1, 0); \
+  int16x4_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 3, 2, 1, 0); \
+  int16x8_t __rev2_734;  __rev2_734 = __builtin_shufflevector(__s2_734, __s2_734, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_734; \
+  __ret_734 = __noswap_vqdmlsl_s16(__rev0_734, __rev1_734, __noswap_splat_laneq_s16(__rev2_734, __p3_734)); \
+  __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 3, 2, 1, 0); \
+  __ret_734; \
 })
 #endif
 
@@ -59201,78 +59389,78 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_lane_s32(__p0_719, __p1_719, __p2_719) __extension__ ({ \
-  int32_t __s0_719 = __p0_719; \
-  int32x2_t __s1_719 = __p1_719; \
-  int32_t __ret_719; \
-  __ret_719 = vqdmulhs_s32(__s0_719, vget_lane_s32(__s1_719, __p2_719)); \
-  __ret_719; \
+#define vqdmulhs_lane_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \
+  int32_t __s0_735 = __p0_735; \
+  int32x2_t __s1_735 = __p1_735; \
+  int32_t __ret_735; \
+  __ret_735 = vqdmulhs_s32(__s0_735, vget_lane_s32(__s1_735, __p2_735)); \
+  __ret_735; \
 })
 #else
-#define vqdmulhs_lane_s32(__p0_720, __p1_720, __p2_720) __extension__ ({ \
-  int32_t __s0_720 = __p0_720; \
-  int32x2_t __s1_720 = __p1_720; \
-  int32x2_t __rev1_720;  __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 1, 0); \
-  int32_t __ret_720; \
-  __ret_720 = vqdmulhs_s32(__s0_720, __noswap_vget_lane_s32(__rev1_720, __p2_720)); \
-  __ret_720; \
+#define vqdmulhs_lane_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \
+  int32_t __s0_736 = __p0_736; \
+  int32x2_t __s1_736 = __p1_736; \
+  int32x2_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \
+  int32_t __ret_736; \
+  __ret_736 = vqdmulhs_s32(__s0_736, __noswap_vget_lane_s32(__rev1_736, __p2_736)); \
+  __ret_736; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_lane_s16(__p0_721, __p1_721, __p2_721) __extension__ ({ \
-  int16_t __s0_721 = __p0_721; \
-  int16x4_t __s1_721 = __p1_721; \
-  int16_t __ret_721; \
-  __ret_721 = vqdmulhh_s16(__s0_721, vget_lane_s16(__s1_721, __p2_721)); \
-  __ret_721; \
+#define vqdmulhh_lane_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \
+  int16_t __s0_737 = __p0_737; \
+  int16x4_t __s1_737 = __p1_737; \
+  int16_t __ret_737; \
+  __ret_737 = vqdmulhh_s16(__s0_737, vget_lane_s16(__s1_737, __p2_737)); \
+  __ret_737; \
 })
 #else
-#define vqdmulhh_lane_s16(__p0_722, __p1_722, __p2_722) __extension__ ({ \
-  int16_t __s0_722 = __p0_722; \
-  int16x4_t __s1_722 = __p1_722; \
-  int16x4_t __rev1_722;  __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 3, 2, 1, 0); \
-  int16_t __ret_722; \
-  __ret_722 = vqdmulhh_s16(__s0_722, __noswap_vget_lane_s16(__rev1_722, __p2_722)); \
-  __ret_722; \
+#define vqdmulhh_lane_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \
+  int16_t __s0_738 = __p0_738; \
+  int16x4_t __s1_738 = __p1_738; \
+  int16x4_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 3, 2, 1, 0); \
+  int16_t __ret_738; \
+  __ret_738 = vqdmulhh_s16(__s0_738, __noswap_vget_lane_s16(__rev1_738, __p2_738)); \
+  __ret_738; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_laneq_s32(__p0_723, __p1_723, __p2_723) __extension__ ({ \
-  int32_t __s0_723 = __p0_723; \
-  int32x4_t __s1_723 = __p1_723; \
-  int32_t __ret_723; \
-  __ret_723 = vqdmulhs_s32(__s0_723, vgetq_lane_s32(__s1_723, __p2_723)); \
-  __ret_723; \
+#define vqdmulhs_laneq_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \
+  int32_t __s0_739 = __p0_739; \
+  int32x4_t __s1_739 = __p1_739; \
+  int32_t __ret_739; \
+  __ret_739 = vqdmulhs_s32(__s0_739, vgetq_lane_s32(__s1_739, __p2_739)); \
+  __ret_739; \
 })
 #else
-#define vqdmulhs_laneq_s32(__p0_724, __p1_724, __p2_724) __extension__ ({ \
-  int32_t __s0_724 = __p0_724; \
-  int32x4_t __s1_724 = __p1_724; \
-  int32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
-  int32_t __ret_724; \
-  __ret_724 = vqdmulhs_s32(__s0_724, __noswap_vgetq_lane_s32(__rev1_724, __p2_724)); \
-  __ret_724; \
+#define vqdmulhs_laneq_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \
+  int32_t __s0_740 = __p0_740; \
+  int32x4_t __s1_740 = __p1_740; \
+  int32x4_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 3, 2, 1, 0); \
+  int32_t __ret_740; \
+  __ret_740 = vqdmulhs_s32(__s0_740, __noswap_vgetq_lane_s32(__rev1_740, __p2_740)); \
+  __ret_740; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_laneq_s16(__p0_725, __p1_725, __p2_725) __extension__ ({ \
-  int16_t __s0_725 = __p0_725; \
-  int16x8_t __s1_725 = __p1_725; \
-  int16_t __ret_725; \
-  __ret_725 = vqdmulhh_s16(__s0_725, vgetq_lane_s16(__s1_725, __p2_725)); \
-  __ret_725; \
+#define vqdmulhh_laneq_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \
+  int16_t __s0_741 = __p0_741; \
+  int16x8_t __s1_741 = __p1_741; \
+  int16_t __ret_741; \
+  __ret_741 = vqdmulhh_s16(__s0_741, vgetq_lane_s16(__s1_741, __p2_741)); \
+  __ret_741; \
 })
 #else
-#define vqdmulhh_laneq_s16(__p0_726, __p1_726, __p2_726) __extension__ ({ \
-  int16_t __s0_726 = __p0_726; \
-  int16x8_t __s1_726 = __p1_726; \
-  int16x8_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_726; \
-  __ret_726 = vqdmulhh_s16(__s0_726, __noswap_vgetq_lane_s16(__rev1_726, __p2_726)); \
-  __ret_726; \
+#define vqdmulhh_laneq_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \
+  int16_t __s0_742 = __p0_742; \
+  int16x8_t __s1_742 = __p1_742; \
+  int16x8_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_742; \
+  __ret_742 = vqdmulhh_s16(__s0_742, __noswap_vgetq_lane_s16(__rev1_742, __p2_742)); \
+  __ret_742; \
 })
 #endif
 
@@ -59405,86 +59593,86 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s32(__p0_727, __p1_727, __p2_727) __extension__ ({ \
-  int32x4_t __s0_727 = __p0_727; \
-  int32x2_t __s1_727 = __p1_727; \
-  int64x2_t __ret_727; \
-  __ret_727 = vqdmull_s32(vget_high_s32(__s0_727), splat_lane_s32(__s1_727, __p2_727)); \
-  __ret_727; \
+#define vqdmull_high_lane_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \
+  int32x4_t __s0_743 = __p0_743; \
+  int32x2_t __s1_743 = __p1_743; \
+  int64x2_t __ret_743; \
+  __ret_743 = vqdmull_s32(vget_high_s32(__s0_743), splat_lane_s32(__s1_743, __p2_743)); \
+  __ret_743; \
 })
 #else
-#define vqdmull_high_lane_s32(__p0_728, __p1_728, __p2_728) __extension__ ({ \
-  int32x4_t __s0_728 = __p0_728; \
-  int32x2_t __s1_728 = __p1_728; \
-  int32x4_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 3, 2, 1, 0); \
-  int32x2_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 1, 0); \
-  int64x2_t __ret_728; \
-  __ret_728 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_728), __noswap_splat_lane_s32(__rev1_728, __p2_728)); \
-  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \
-  __ret_728; \
+#define vqdmull_high_lane_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \
+  int32x4_t __s0_744 = __p0_744; \
+  int32x2_t __s1_744 = __p1_744; \
+  int32x4_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 3, 2, 1, 0); \
+  int32x2_t __rev1_744;  __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 1, 0); \
+  int64x2_t __ret_744; \
+  __ret_744 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_744), __noswap_splat_lane_s32(__rev1_744, __p2_744)); \
+  __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \
+  __ret_744; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s16(__p0_729, __p1_729, __p2_729) __extension__ ({ \
-  int16x8_t __s0_729 = __p0_729; \
-  int16x4_t __s1_729 = __p1_729; \
-  int32x4_t __ret_729; \
-  __ret_729 = vqdmull_s16(vget_high_s16(__s0_729), splat_lane_s16(__s1_729, __p2_729)); \
-  __ret_729; \
+#define vqdmull_high_lane_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \
+  int16x8_t __s0_745 = __p0_745; \
+  int16x4_t __s1_745 = __p1_745; \
+  int32x4_t __ret_745; \
+  __ret_745 = vqdmull_s16(vget_high_s16(__s0_745), splat_lane_s16(__s1_745, __p2_745)); \
+  __ret_745; \
 })
 #else
-#define vqdmull_high_lane_s16(__p0_730, __p1_730, __p2_730) __extension__ ({ \
-  int16x8_t __s0_730 = __p0_730; \
-  int16x4_t __s1_730 = __p1_730; \
-  int16x8_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 3, 2, 1, 0); \
-  int32x4_t __ret_730; \
-  __ret_730 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_730), __noswap_splat_lane_s16(__rev1_730, __p2_730)); \
-  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \
-  __ret_730; \
+#define vqdmull_high_lane_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \
+  int16x8_t __s0_746 = __p0_746; \
+  int16x4_t __s1_746 = __p1_746; \
+  int16x8_t __rev0_746;  __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_746;  __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 3, 2, 1, 0); \
+  int32x4_t __ret_746; \
+  __ret_746 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_746), __noswap_splat_lane_s16(__rev1_746, __p2_746)); \
+  __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \
+  __ret_746; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s32(__p0_731, __p1_731, __p2_731) __extension__ ({ \
-  int32x4_t __s0_731 = __p0_731; \
-  int32x4_t __s1_731 = __p1_731; \
-  int64x2_t __ret_731; \
-  __ret_731 = vqdmull_s32(vget_high_s32(__s0_731), splat_laneq_s32(__s1_731, __p2_731)); \
-  __ret_731; \
+#define vqdmull_high_laneq_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \
+  int32x4_t __s0_747 = __p0_747; \
+  int32x4_t __s1_747 = __p1_747; \
+  int64x2_t __ret_747; \
+  __ret_747 = vqdmull_s32(vget_high_s32(__s0_747), splat_laneq_s32(__s1_747, __p2_747)); \
+  __ret_747; \
 })
 #else
-#define vqdmull_high_laneq_s32(__p0_732, __p1_732, __p2_732) __extension__ ({ \
-  int32x4_t __s0_732 = __p0_732; \
-  int32x4_t __s1_732 = __p1_732; \
-  int32x4_t __rev0_732;  __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 3, 2, 1, 0); \
-  int32x4_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 3, 2, 1, 0); \
-  int64x2_t __ret_732; \
-  __ret_732 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_732), __noswap_splat_laneq_s32(__rev1_732, __p2_732)); \
-  __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 1, 0); \
-  __ret_732; \
+#define vqdmull_high_laneq_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \
+  int32x4_t __s0_748 = __p0_748; \
+  int32x4_t __s1_748 = __p1_748; \
+  int32x4_t __rev0_748;  __rev0_748 = __builtin_shufflevector(__s0_748, __s0_748, 3, 2, 1, 0); \
+  int32x4_t __rev1_748;  __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 3, 2, 1, 0); \
+  int64x2_t __ret_748; \
+  __ret_748 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_748), __noswap_splat_laneq_s32(__rev1_748, __p2_748)); \
+  __ret_748 = __builtin_shufflevector(__ret_748, __ret_748, 1, 0); \
+  __ret_748; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \
-  int16x8_t __s0_733 = __p0_733; \
-  int16x8_t __s1_733 = __p1_733; \
-  int32x4_t __ret_733; \
-  __ret_733 = vqdmull_s16(vget_high_s16(__s0_733), splat_laneq_s16(__s1_733, __p2_733)); \
-  __ret_733; \
+#define vqdmull_high_laneq_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \
+  int16x8_t __s0_749 = __p0_749; \
+  int16x8_t __s1_749 = __p1_749; \
+  int32x4_t __ret_749; \
+  __ret_749 = vqdmull_s16(vget_high_s16(__s0_749), splat_laneq_s16(__s1_749, __p2_749)); \
+  __ret_749; \
 })
 #else
-#define vqdmull_high_laneq_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \
-  int16x8_t __s0_734 = __p0_734; \
-  int16x8_t __s1_734 = __p1_734; \
-  int16x8_t __rev0_734;  __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_734; \
-  __ret_734 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_734), __noswap_splat_laneq_s16(__rev1_734, __p2_734)); \
-  __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 3, 2, 1, 0); \
-  __ret_734; \
+#define vqdmull_high_laneq_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \
+  int16x8_t __s0_750 = __p0_750; \
+  int16x8_t __s1_750 = __p1_750; \
+  int16x8_t __rev0_750;  __rev0_750 = __builtin_shufflevector(__s0_750, __s0_750, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_750; \
+  __ret_750 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_750), __noswap_splat_laneq_s16(__rev1_750, __p2_750)); \
+  __ret_750 = __builtin_shufflevector(__ret_750, __ret_750, 3, 2, 1, 0); \
+  __ret_750; \
 })
 #endif
 
@@ -59521,120 +59709,120 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulls_lane_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \
-  int32_t __s0_735 = __p0_735; \
-  int32x2_t __s1_735 = __p1_735; \
-  int64_t __ret_735; \
-  __ret_735 = vqdmulls_s32(__s0_735, vget_lane_s32(__s1_735, __p2_735)); \
-  __ret_735; \
+#define vqdmulls_lane_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \
+  int32_t __s0_751 = __p0_751; \
+  int32x2_t __s1_751 = __p1_751; \
+  int64_t __ret_751; \
+  __ret_751 = vqdmulls_s32(__s0_751, vget_lane_s32(__s1_751, __p2_751)); \
+  __ret_751; \
 })
 #else
-#define vqdmulls_lane_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \
-  int32_t __s0_736 = __p0_736; \
-  int32x2_t __s1_736 = __p1_736; \
-  int32x2_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \
-  int64_t __ret_736; \
-  __ret_736 = vqdmulls_s32(__s0_736, __noswap_vget_lane_s32(__rev1_736, __p2_736)); \
-  __ret_736; \
+#define vqdmulls_lane_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \
+  int32_t __s0_752 = __p0_752; \
+  int32x2_t __s1_752 = __p1_752; \
+  int32x2_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 1, 0); \
+  int64_t __ret_752; \
+  __ret_752 = vqdmulls_s32(__s0_752, __noswap_vget_lane_s32(__rev1_752, __p2_752)); \
+  __ret_752; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmullh_lane_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \
-  int16_t __s0_737 = __p0_737; \
-  int16x4_t __s1_737 = __p1_737; \
-  int32_t __ret_737; \
-  __ret_737 = vqdmullh_s16(__s0_737, vget_lane_s16(__s1_737, __p2_737)); \
-  __ret_737; \
+#define vqdmullh_lane_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \
+  int16_t __s0_753 = __p0_753; \
+  int16x4_t __s1_753 = __p1_753; \
+  int32_t __ret_753; \
+  __ret_753 = vqdmullh_s16(__s0_753, vget_lane_s16(__s1_753, __p2_753)); \
+  __ret_753; \
 })
 #else
-#define vqdmullh_lane_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \
-  int16_t __s0_738 = __p0_738; \
-  int16x4_t __s1_738 = __p1_738; \
-  int16x4_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 3, 2, 1, 0); \
-  int32_t __ret_738; \
-  __ret_738 = vqdmullh_s16(__s0_738, __noswap_vget_lane_s16(__rev1_738, __p2_738)); \
-  __ret_738; \
+#define vqdmullh_lane_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \
+  int16_t __s0_754 = __p0_754; \
+  int16x4_t __s1_754 = __p1_754; \
+  int16x4_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 3, 2, 1, 0); \
+  int32_t __ret_754; \
+  __ret_754 = vqdmullh_s16(__s0_754, __noswap_vget_lane_s16(__rev1_754, __p2_754)); \
+  __ret_754; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulls_laneq_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \
-  int32_t __s0_739 = __p0_739; \
-  int32x4_t __s1_739 = __p1_739; \
-  int64_t __ret_739; \
-  __ret_739 = vqdmulls_s32(__s0_739, vgetq_lane_s32(__s1_739, __p2_739)); \
-  __ret_739; \
+#define vqdmulls_laneq_s32(__p0_755, __p1_755, __p2_755) __extension__ ({ \
+  int32_t __s0_755 = __p0_755; \
+  int32x4_t __s1_755 = __p1_755; \
+  int64_t __ret_755; \
+  __ret_755 = vqdmulls_s32(__s0_755, vgetq_lane_s32(__s1_755, __p2_755)); \
+  __ret_755; \
 })
 #else
-#define vqdmulls_laneq_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \
-  int32_t __s0_740 = __p0_740; \
-  int32x4_t __s1_740 = __p1_740; \
-  int32x4_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 3, 2, 1, 0); \
-  int64_t __ret_740; \
-  __ret_740 = vqdmulls_s32(__s0_740, __noswap_vgetq_lane_s32(__rev1_740, __p2_740)); \
-  __ret_740; \
+#define vqdmulls_laneq_s32(__p0_756, __p1_756, __p2_756) __extension__ ({ \
+  int32_t __s0_756 = __p0_756; \
+  int32x4_t __s1_756 = __p1_756; \
+  int32x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
+  int64_t __ret_756; \
+  __ret_756 = vqdmulls_s32(__s0_756, __noswap_vgetq_lane_s32(__rev1_756, __p2_756)); \
+  __ret_756; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmullh_laneq_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \
-  int16_t __s0_741 = __p0_741; \
-  int16x8_t __s1_741 = __p1_741; \
-  int32_t __ret_741; \
-  __ret_741 = vqdmullh_s16(__s0_741, vgetq_lane_s16(__s1_741, __p2_741)); \
-  __ret_741; \
+#define vqdmullh_laneq_s16(__p0_757, __p1_757, __p2_757) __extension__ ({ \
+  int16_t __s0_757 = __p0_757; \
+  int16x8_t __s1_757 = __p1_757; \
+  int32_t __ret_757; \
+  __ret_757 = vqdmullh_s16(__s0_757, vgetq_lane_s16(__s1_757, __p2_757)); \
+  __ret_757; \
 })
 #else
-#define vqdmullh_laneq_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \
-  int16_t __s0_742 = __p0_742; \
-  int16x8_t __s1_742 = __p1_742; \
-  int16x8_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret_742; \
-  __ret_742 = vqdmullh_s16(__s0_742, __noswap_vgetq_lane_s16(__rev1_742, __p2_742)); \
-  __ret_742; \
+#define vqdmullh_laneq_s16(__p0_758, __p1_758, __p2_758) __extension__ ({ \
+  int16_t __s0_758 = __p0_758; \
+  int16x8_t __s1_758 = __p1_758; \
+  int16x8_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32_t __ret_758; \
+  __ret_758 = vqdmullh_s16(__s0_758, __noswap_vgetq_lane_s16(__rev1_758, __p2_758)); \
+  __ret_758; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \
-  int32x2_t __s0_743 = __p0_743; \
-  int32x4_t __s1_743 = __p1_743; \
-  int64x2_t __ret_743; \
-  __ret_743 = vqdmull_s32(__s0_743, splat_laneq_s32(__s1_743, __p2_743)); \
-  __ret_743; \
+#define vqdmull_laneq_s32(__p0_759, __p1_759, __p2_759) __extension__ ({ \
+  int32x2_t __s0_759 = __p0_759; \
+  int32x4_t __s1_759 = __p1_759; \
+  int64x2_t __ret_759; \
+  __ret_759 = vqdmull_s32(__s0_759, splat_laneq_s32(__s1_759, __p2_759)); \
+  __ret_759; \
 })
 #else
-#define vqdmull_laneq_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \
-  int32x2_t __s0_744 = __p0_744; \
-  int32x4_t __s1_744 = __p1_744; \
-  int32x2_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 1, 0); \
-  int32x4_t __rev1_744;  __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 3, 2, 1, 0); \
-  int64x2_t __ret_744; \
-  __ret_744 = __noswap_vqdmull_s32(__rev0_744, __noswap_splat_laneq_s32(__rev1_744, __p2_744)); \
-  __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \
-  __ret_744; \
+#define vqdmull_laneq_s32(__p0_760, __p1_760, __p2_760) __extension__ ({ \
+  int32x2_t __s0_760 = __p0_760; \
+  int32x4_t __s1_760 = __p1_760; \
+  int32x2_t __rev0_760;  __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 1, 0); \
+  int32x4_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 3, 2, 1, 0); \
+  int64x2_t __ret_760; \
+  __ret_760 = __noswap_vqdmull_s32(__rev0_760, __noswap_splat_laneq_s32(__rev1_760, __p2_760)); \
+  __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 1, 0); \
+  __ret_760; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \
-  int16x4_t __s0_745 = __p0_745; \
-  int16x8_t __s1_745 = __p1_745; \
-  int32x4_t __ret_745; \
-  __ret_745 = vqdmull_s16(__s0_745, splat_laneq_s16(__s1_745, __p2_745)); \
-  __ret_745; \
+#define vqdmull_laneq_s16(__p0_761, __p1_761, __p2_761) __extension__ ({ \
+  int16x4_t __s0_761 = __p0_761; \
+  int16x8_t __s1_761 = __p1_761; \
+  int32x4_t __ret_761; \
+  __ret_761 = vqdmull_s16(__s0_761, splat_laneq_s16(__s1_761, __p2_761)); \
+  __ret_761; \
 })
 #else
-#define vqdmull_laneq_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \
-  int16x4_t __s0_746 = __p0_746; \
-  int16x8_t __s1_746 = __p1_746; \
-  int16x4_t __rev0_746;  __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 3, 2, 1, 0); \
-  int16x8_t __rev1_746;  __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_746; \
-  __ret_746 = __noswap_vqdmull_s16(__rev0_746, __noswap_splat_laneq_s16(__rev1_746, __p2_746)); \
-  __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \
-  __ret_746; \
+#define vqdmull_laneq_s16(__p0_762, __p1_762, __p2_762) __extension__ ({ \
+  int16x4_t __s0_762 = __p0_762; \
+  int16x8_t __s1_762 = __p1_762; \
+  int16x4_t __rev0_762;  __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 3, 2, 1, 0); \
+  int16x8_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_762; \
+  __ret_762 = __noswap_vqdmull_s16(__rev0_762, __noswap_splat_laneq_s16(__rev1_762, __p2_762)); \
+  __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 3, 2, 1, 0); \
+  __ret_762; \
 })
 #endif
 
@@ -59972,78 +60160,78 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_lane_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \
-  int32_t __s0_747 = __p0_747; \
-  int32x2_t __s1_747 = __p1_747; \
-  int32_t __ret_747; \
-  __ret_747 = vqrdmulhs_s32(__s0_747, vget_lane_s32(__s1_747, __p2_747)); \
-  __ret_747; \
+#define vqrdmulhs_lane_s32(__p0_763, __p1_763, __p2_763) __extension__ ({ \
+  int32_t __s0_763 = __p0_763; \
+  int32x2_t __s1_763 = __p1_763; \
+  int32_t __ret_763; \
+  __ret_763 = vqrdmulhs_s32(__s0_763, vget_lane_s32(__s1_763, __p2_763)); \
+  __ret_763; \
 })
 #else
-#define vqrdmulhs_lane_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \
-  int32_t __s0_748 = __p0_748; \
-  int32x2_t __s1_748 = __p1_748; \
-  int32x2_t __rev1_748;  __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 1, 0); \
-  int32_t __ret_748; \
-  __ret_748 = vqrdmulhs_s32(__s0_748, __noswap_vget_lane_s32(__rev1_748, __p2_748)); \
-  __ret_748; \
+#define vqrdmulhs_lane_s32(__p0_764, __p1_764, __p2_764) __extension__ ({ \
+  int32_t __s0_764 = __p0_764; \
+  int32x2_t __s1_764 = __p1_764; \
+  int32x2_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 1, 0); \
+  int32_t __ret_764; \
+  __ret_764 = vqrdmulhs_s32(__s0_764, __noswap_vget_lane_s32(__rev1_764, __p2_764)); \
+  __ret_764; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_lane_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \
-  int16_t __s0_749 = __p0_749; \
-  int16x4_t __s1_749 = __p1_749; \
-  int16_t __ret_749; \
-  __ret_749 = vqrdmulhh_s16(__s0_749, vget_lane_s16(__s1_749, __p2_749)); \
-  __ret_749; \
+#define vqrdmulhh_lane_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
+  int16_t __s0_765 = __p0_765; \
+  int16x4_t __s1_765 = __p1_765; \
+  int16_t __ret_765; \
+  __ret_765 = vqrdmulhh_s16(__s0_765, vget_lane_s16(__s1_765, __p2_765)); \
+  __ret_765; \
 })
 #else
-#define vqrdmulhh_lane_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \
-  int16_t __s0_750 = __p0_750; \
-  int16x4_t __s1_750 = __p1_750; \
-  int16x4_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 3, 2, 1, 0); \
-  int16_t __ret_750; \
-  __ret_750 = vqrdmulhh_s16(__s0_750, __noswap_vget_lane_s16(__rev1_750, __p2_750)); \
-  __ret_750; \
+#define vqrdmulhh_lane_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
+  int16_t __s0_766 = __p0_766; \
+  int16x4_t __s1_766 = __p1_766; \
+  int16x4_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 3, 2, 1, 0); \
+  int16_t __ret_766; \
+  __ret_766 = vqrdmulhh_s16(__s0_766, __noswap_vget_lane_s16(__rev1_766, __p2_766)); \
+  __ret_766; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_laneq_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \
-  int32_t __s0_751 = __p0_751; \
-  int32x4_t __s1_751 = __p1_751; \
-  int32_t __ret_751; \
-  __ret_751 = vqrdmulhs_s32(__s0_751, vgetq_lane_s32(__s1_751, __p2_751)); \
-  __ret_751; \
+#define vqrdmulhs_laneq_s32(__p0_767, __p1_767, __p2_767) __extension__ ({ \
+  int32_t __s0_767 = __p0_767; \
+  int32x4_t __s1_767 = __p1_767; \
+  int32_t __ret_767; \
+  __ret_767 = vqrdmulhs_s32(__s0_767, vgetq_lane_s32(__s1_767, __p2_767)); \
+  __ret_767; \
 })
 #else
-#define vqrdmulhs_laneq_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \
-  int32_t __s0_752 = __p0_752; \
-  int32x4_t __s1_752 = __p1_752; \
-  int32x4_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 3, 2, 1, 0); \
-  int32_t __ret_752; \
-  __ret_752 = vqrdmulhs_s32(__s0_752, __noswap_vgetq_lane_s32(__rev1_752, __p2_752)); \
-  __ret_752; \
+#define vqrdmulhs_laneq_s32(__p0_768, __p1_768, __p2_768) __extension__ ({ \
+  int32_t __s0_768 = __p0_768; \
+  int32x4_t __s1_768 = __p1_768; \
+  int32x4_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \
+  int32_t __ret_768; \
+  __ret_768 = vqrdmulhs_s32(__s0_768, __noswap_vgetq_lane_s32(__rev1_768, __p2_768)); \
+  __ret_768; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_laneq_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \
-  int16_t __s0_753 = __p0_753; \
-  int16x8_t __s1_753 = __p1_753; \
-  int16_t __ret_753; \
-  __ret_753 = vqrdmulhh_s16(__s0_753, vgetq_lane_s16(__s1_753, __p2_753)); \
-  __ret_753; \
+#define vqrdmulhh_laneq_s16(__p0_769, __p1_769, __p2_769) __extension__ ({ \
+  int16_t __s0_769 = __p0_769; \
+  int16x8_t __s1_769 = __p1_769; \
+  int16_t __ret_769; \
+  __ret_769 = vqrdmulhh_s16(__s0_769, vgetq_lane_s16(__s1_769, __p2_769)); \
+  __ret_769; \
 })
 #else
-#define vqrdmulhh_laneq_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \
-  int16_t __s0_754 = __p0_754; \
-  int16x8_t __s1_754 = __p1_754; \
-  int16x8_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_754; \
-  __ret_754 = vqrdmulhh_s16(__s0_754, __noswap_vgetq_lane_s16(__rev1_754, __p2_754)); \
-  __ret_754; \
+#define vqrdmulhh_laneq_s16(__p0_770, __p1_770, __p2_770) __extension__ ({ \
+  int16_t __s0_770 = __p0_770; \
+  int16x8_t __s1_770 = __p1_770; \
+  int16x8_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_770; \
+  __ret_770 = vqrdmulhh_s16(__s0_770, __noswap_vgetq_lane_s16(__rev1_770, __p2_770)); \
+  __ret_770; \
 })
 #endif
 
@@ -60172,128 +60360,128 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u32(__p0_755, __p1_755, __p2_755) __extension__ ({ \
-  uint16x4_t __s0_755 = __p0_755; \
-  uint32x4_t __s1_755 = __p1_755; \
-  uint16x8_t __ret_755; \
-  __ret_755 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_755), (uint16x4_t)(vqrshrn_n_u32(__s1_755, __p2_755)))); \
-  __ret_755; \
+#define vqrshrn_high_n_u32(__p0_771, __p1_771, __p2_771) __extension__ ({ \
+  uint16x4_t __s0_771 = __p0_771; \
+  uint32x4_t __s1_771 = __p1_771; \
+  uint16x8_t __ret_771; \
+  __ret_771 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_771), (uint16x4_t)(vqrshrn_n_u32(__s1_771, __p2_771)))); \
+  __ret_771; \
 })
 #else
-#define vqrshrn_high_n_u32(__p0_756, __p1_756, __p2_756) __extension__ ({ \
-  uint16x4_t __s0_756 = __p0_756; \
-  uint32x4_t __s1_756 = __p1_756; \
-  uint16x4_t __rev0_756;  __rev0_756 = __builtin_shufflevector(__s0_756, __s0_756, 3, 2, 1, 0); \
-  uint32x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
-  uint16x8_t __ret_756; \
-  __ret_756 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_756), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_756, __p2_756)))); \
-  __ret_756 = __builtin_shufflevector(__ret_756, __ret_756, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_756; \
+#define vqrshrn_high_n_u32(__p0_772, __p1_772, __p2_772) __extension__ ({ \
+  uint16x4_t __s0_772 = __p0_772; \
+  uint32x4_t __s1_772 = __p1_772; \
+  uint16x4_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 3, 2, 1, 0); \
+  uint32x4_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 3, 2, 1, 0); \
+  uint16x8_t __ret_772; \
+  __ret_772 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_772), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_772, __p2_772)))); \
+  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_772; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u64(__p0_757, __p1_757, __p2_757) __extension__ ({ \
-  uint32x2_t __s0_757 = __p0_757; \
-  uint64x2_t __s1_757 = __p1_757; \
-  uint32x4_t __ret_757; \
-  __ret_757 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_757), (uint32x2_t)(vqrshrn_n_u64(__s1_757, __p2_757)))); \
-  __ret_757; \
+#define vqrshrn_high_n_u64(__p0_773, __p1_773, __p2_773) __extension__ ({ \
+  uint32x2_t __s0_773 = __p0_773; \
+  uint64x2_t __s1_773 = __p1_773; \
+  uint32x4_t __ret_773; \
+  __ret_773 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_773), (uint32x2_t)(vqrshrn_n_u64(__s1_773, __p2_773)))); \
+  __ret_773; \
 })
 #else
-#define vqrshrn_high_n_u64(__p0_758, __p1_758, __p2_758) __extension__ ({ \
-  uint32x2_t __s0_758 = __p0_758; \
-  uint64x2_t __s1_758 = __p1_758; \
-  uint32x2_t __rev0_758;  __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 1, 0); \
-  uint64x2_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 1, 0); \
-  uint32x4_t __ret_758; \
-  __ret_758 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_758), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_758, __p2_758)))); \
-  __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 3, 2, 1, 0); \
-  __ret_758; \
+#define vqrshrn_high_n_u64(__p0_774, __p1_774, __p2_774) __extension__ ({ \
+  uint32x2_t __s0_774 = __p0_774; \
+  uint64x2_t __s1_774 = __p1_774; \
+  uint32x2_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 1, 0); \
+  uint64x2_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 1, 0); \
+  uint32x4_t __ret_774; \
+  __ret_774 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_774), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_774, __p2_774)))); \
+  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 3, 2, 1, 0); \
+  __ret_774; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u16(__p0_759, __p1_759, __p2_759) __extension__ ({ \
-  uint8x8_t __s0_759 = __p0_759; \
-  uint16x8_t __s1_759 = __p1_759; \
-  uint8x16_t __ret_759; \
-  __ret_759 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_759), (uint8x8_t)(vqrshrn_n_u16(__s1_759, __p2_759)))); \
-  __ret_759; \
+#define vqrshrn_high_n_u16(__p0_775, __p1_775, __p2_775) __extension__ ({ \
+  uint8x8_t __s0_775 = __p0_775; \
+  uint16x8_t __s1_775 = __p1_775; \
+  uint8x16_t __ret_775; \
+  __ret_775 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_775), (uint8x8_t)(vqrshrn_n_u16(__s1_775, __p2_775)))); \
+  __ret_775; \
 })
 #else
-#define vqrshrn_high_n_u16(__p0_760, __p1_760, __p2_760) __extension__ ({ \
-  uint8x8_t __s0_760 = __p0_760; \
-  uint16x8_t __s1_760 = __p1_760; \
-  uint8x8_t __rev0_760;  __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_760; \
-  __ret_760 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_760), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_760, __p2_760)))); \
-  __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_760; \
+#define vqrshrn_high_n_u16(__p0_776, __p1_776, __p2_776) __extension__ ({ \
+  uint8x8_t __s0_776 = __p0_776; \
+  uint16x8_t __s1_776 = __p1_776; \
+  uint8x8_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_776; \
+  __ret_776 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_776), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_776, __p2_776)))); \
+  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_776; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s32(__p0_761, __p1_761, __p2_761) __extension__ ({ \
-  int16x4_t __s0_761 = __p0_761; \
-  int32x4_t __s1_761 = __p1_761; \
-  int16x8_t __ret_761; \
-  __ret_761 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_761), (int16x4_t)(vqrshrn_n_s32(__s1_761, __p2_761)))); \
-  __ret_761; \
+#define vqrshrn_high_n_s32(__p0_777, __p1_777, __p2_777) __extension__ ({ \
+  int16x4_t __s0_777 = __p0_777; \
+  int32x4_t __s1_777 = __p1_777; \
+  int16x8_t __ret_777; \
+  __ret_777 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_777), (int16x4_t)(vqrshrn_n_s32(__s1_777, __p2_777)))); \
+  __ret_777; \
 })
 #else
-#define vqrshrn_high_n_s32(__p0_762, __p1_762, __p2_762) __extension__ ({ \
-  int16x4_t __s0_762 = __p0_762; \
-  int32x4_t __s1_762 = __p1_762; \
-  int16x4_t __rev0_762;  __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 3, 2, 1, 0); \
-  int32x4_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 3, 2, 1, 0); \
-  int16x8_t __ret_762; \
-  __ret_762 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_762), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_762, __p2_762)))); \
-  __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_762; \
+#define vqrshrn_high_n_s32(__p0_778, __p1_778, __p2_778) __extension__ ({ \
+  int16x4_t __s0_778 = __p0_778; \
+  int32x4_t __s1_778 = __p1_778; \
+  int16x4_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 3, 2, 1, 0); \
+  int32x4_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 3, 2, 1, 0); \
+  int16x8_t __ret_778; \
+  __ret_778 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_778), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_778, __p2_778)))); \
+  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_778; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s64(__p0_763, __p1_763, __p2_763) __extension__ ({ \
-  int32x2_t __s0_763 = __p0_763; \
-  int64x2_t __s1_763 = __p1_763; \
-  int32x4_t __ret_763; \
-  __ret_763 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_763), (int32x2_t)(vqrshrn_n_s64(__s1_763, __p2_763)))); \
-  __ret_763; \
+#define vqrshrn_high_n_s64(__p0_779, __p1_779, __p2_779) __extension__ ({ \
+  int32x2_t __s0_779 = __p0_779; \
+  int64x2_t __s1_779 = __p1_779; \
+  int32x4_t __ret_779; \
+  __ret_779 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_779), (int32x2_t)(vqrshrn_n_s64(__s1_779, __p2_779)))); \
+  __ret_779; \
 })
 #else
-#define vqrshrn_high_n_s64(__p0_764, __p1_764, __p2_764) __extension__ ({ \
-  int32x2_t __s0_764 = __p0_764; \
-  int64x2_t __s1_764 = __p1_764; \
-  int32x2_t __rev0_764;  __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 1, 0); \
-  int64x2_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 1, 0); \
-  int32x4_t __ret_764; \
-  __ret_764 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_764), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_764, __p2_764)))); \
-  __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 3, 2, 1, 0); \
-  __ret_764; \
+#define vqrshrn_high_n_s64(__p0_780, __p1_780, __p2_780) __extension__ ({ \
+  int32x2_t __s0_780 = __p0_780; \
+  int64x2_t __s1_780 = __p1_780; \
+  int32x2_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 1, 0); \
+  int64x2_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 1, 0); \
+  int32x4_t __ret_780; \
+  __ret_780 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_780), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_780, __p2_780)))); \
+  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 3, 2, 1, 0); \
+  __ret_780; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
-  int8x8_t __s0_765 = __p0_765; \
-  int16x8_t __s1_765 = __p1_765; \
-  int8x16_t __ret_765; \
-  __ret_765 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_765), (int8x8_t)(vqrshrn_n_s16(__s1_765, __p2_765)))); \
-  __ret_765; \
+#define vqrshrn_high_n_s16(__p0_781, __p1_781, __p2_781) __extension__ ({ \
+  int8x8_t __s0_781 = __p0_781; \
+  int16x8_t __s1_781 = __p1_781; \
+  int8x16_t __ret_781; \
+  __ret_781 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_781), (int8x8_t)(vqrshrn_n_s16(__s1_781, __p2_781)))); \
+  __ret_781; \
 })
 #else
-#define vqrshrn_high_n_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
-  int8x8_t __s0_766 = __p0_766; \
-  int16x8_t __s1_766 = __p1_766; \
-  int8x8_t __rev0_766;  __rev0_766 = __builtin_shufflevector(__s0_766, __s0_766, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_766; \
-  __ret_766 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_766), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_766, __p2_766)))); \
-  __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_766; \
+#define vqrshrn_high_n_s16(__p0_782, __p1_782, __p2_782) __extension__ ({ \
+  int8x8_t __s0_782 = __p0_782; \
+  int16x8_t __s1_782 = __p1_782; \
+  int8x8_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_782; \
+  __ret_782 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_782), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_782, __p2_782)))); \
+  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_782; \
 })
 #endif
 
@@ -60334,65 +60522,65 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s32(__p0_767, __p1_767, __p2_767) __extension__ ({ \
-  int16x4_t __s0_767 = __p0_767; \
-  int32x4_t __s1_767 = __p1_767; \
-  int16x8_t __ret_767; \
-  __ret_767 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_767), (int16x4_t)(vqrshrun_n_s32(__s1_767, __p2_767)))); \
-  __ret_767; \
+#define vqrshrun_high_n_s32(__p0_783, __p1_783, __p2_783) __extension__ ({ \
+  int16x4_t __s0_783 = __p0_783; \
+  int32x4_t __s1_783 = __p1_783; \
+  int16x8_t __ret_783; \
+  __ret_783 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_783), (int16x4_t)(vqrshrun_n_s32(__s1_783, __p2_783)))); \
+  __ret_783; \
 })
 #else
-#define vqrshrun_high_n_s32(__p0_768, __p1_768, __p2_768) __extension__ ({ \
-  int16x4_t __s0_768 = __p0_768; \
-  int32x4_t __s1_768 = __p1_768; \
-  int16x4_t __rev0_768;  __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 3, 2, 1, 0); \
-  int32x4_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \
-  int16x8_t __ret_768; \
-  __ret_768 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_768), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_768, __p2_768)))); \
-  __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_768; \
+#define vqrshrun_high_n_s32(__p0_784, __p1_784, __p2_784) __extension__ ({ \
+  int16x4_t __s0_784 = __p0_784; \
+  int32x4_t __s1_784 = __p1_784; \
+  int16x4_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 3, 2, 1, 0); \
+  int32x4_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 3, 2, 1, 0); \
+  int16x8_t __ret_784; \
+  __ret_784 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_784), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_784, __p2_784)))); \
+  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_784; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s64(__p0_769, __p1_769, __p2_769) __extension__ ({ \
-  int32x2_t __s0_769 = __p0_769; \
-  int64x2_t __s1_769 = __p1_769; \
-  int32x4_t __ret_769; \
-  __ret_769 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_769), (int32x2_t)(vqrshrun_n_s64(__s1_769, __p2_769)))); \
-  __ret_769; \
+#define vqrshrun_high_n_s64(__p0_785, __p1_785, __p2_785) __extension__ ({ \
+  int32x2_t __s0_785 = __p0_785; \
+  int64x2_t __s1_785 = __p1_785; \
+  int32x4_t __ret_785; \
+  __ret_785 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_785), (int32x2_t)(vqrshrun_n_s64(__s1_785, __p2_785)))); \
+  __ret_785; \
 })
 #else
-#define vqrshrun_high_n_s64(__p0_770, __p1_770, __p2_770) __extension__ ({ \
-  int32x2_t __s0_770 = __p0_770; \
-  int64x2_t __s1_770 = __p1_770; \
-  int32x2_t __rev0_770;  __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 1, 0); \
-  int64x2_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 1, 0); \
-  int32x4_t __ret_770; \
-  __ret_770 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_770), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_770, __p2_770)))); \
-  __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \
-  __ret_770; \
+#define vqrshrun_high_n_s64(__p0_786, __p1_786, __p2_786) __extension__ ({ \
+  int32x2_t __s0_786 = __p0_786; \
+  int64x2_t __s1_786 = __p1_786; \
+  int32x2_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 1, 0); \
+  int64x2_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 1, 0); \
+  int32x4_t __ret_786; \
+  __ret_786 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_786), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_786, __p2_786)))); \
+  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 3, 2, 1, 0); \
+  __ret_786; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s16(__p0_771, __p1_771, __p2_771) __extension__ ({ \
-  int8x8_t __s0_771 = __p0_771; \
-  int16x8_t __s1_771 = __p1_771; \
-  int8x16_t __ret_771; \
-  __ret_771 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_771), (int8x8_t)(vqrshrun_n_s16(__s1_771, __p2_771)))); \
-  __ret_771; \
+#define vqrshrun_high_n_s16(__p0_787, __p1_787, __p2_787) __extension__ ({ \
+  int8x8_t __s0_787 = __p0_787; \
+  int16x8_t __s1_787 = __p1_787; \
+  int8x16_t __ret_787; \
+  __ret_787 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_787), (int8x8_t)(vqrshrun_n_s16(__s1_787, __p2_787)))); \
+  __ret_787; \
 })
 #else
-#define vqrshrun_high_n_s16(__p0_772, __p1_772, __p2_772) __extension__ ({ \
-  int8x8_t __s0_772 = __p0_772; \
-  int16x8_t __s1_772 = __p1_772; \
-  int8x8_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_772; \
-  __ret_772 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_772), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_772, __p2_772)))); \
-  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_772; \
+#define vqrshrun_high_n_s16(__p0_788, __p1_788, __p2_788) __extension__ ({ \
+  int8x8_t __s0_788 = __p0_788; \
+  int16x8_t __s1_788 = __p1_788; \
+  int8x8_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_788; \
+  __ret_788 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_788), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_788, __p2_788)))); \
+  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_788; \
 })
 #endif
 
@@ -60527,128 +60715,128 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u32(__p0_773, __p1_773, __p2_773) __extension__ ({ \
-  uint16x4_t __s0_773 = __p0_773; \
-  uint32x4_t __s1_773 = __p1_773; \
-  uint16x8_t __ret_773; \
-  __ret_773 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_773), (uint16x4_t)(vqshrn_n_u32(__s1_773, __p2_773)))); \
-  __ret_773; \
+#define vqshrn_high_n_u32(__p0_789, __p1_789, __p2_789) __extension__ ({ \
+  uint16x4_t __s0_789 = __p0_789; \
+  uint32x4_t __s1_789 = __p1_789; \
+  uint16x8_t __ret_789; \
+  __ret_789 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_789), (uint16x4_t)(vqshrn_n_u32(__s1_789, __p2_789)))); \
+  __ret_789; \
 })
 #else
-#define vqshrn_high_n_u32(__p0_774, __p1_774, __p2_774) __extension__ ({ \
-  uint16x4_t __s0_774 = __p0_774; \
-  uint32x4_t __s1_774 = __p1_774; \
-  uint16x4_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \
-  uint32x4_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \
-  uint16x8_t __ret_774; \
-  __ret_774 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_774), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_774, __p2_774)))); \
-  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_774; \
+#define vqshrn_high_n_u32(__p0_790, __p1_790, __p2_790) __extension__ ({ \
+  uint16x4_t __s0_790 = __p0_790; \
+  uint32x4_t __s1_790 = __p1_790; \
+  uint16x4_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 3, 2, 1, 0); \
+  uint32x4_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 3, 2, 1, 0); \
+  uint16x8_t __ret_790; \
+  __ret_790 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_790), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_790, __p2_790)))); \
+  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_790; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u64(__p0_775, __p1_775, __p2_775) __extension__ ({ \
-  uint32x2_t __s0_775 = __p0_775; \
-  uint64x2_t __s1_775 = __p1_775; \
-  uint32x4_t __ret_775; \
-  __ret_775 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_775), (uint32x2_t)(vqshrn_n_u64(__s1_775, __p2_775)))); \
-  __ret_775; \
+#define vqshrn_high_n_u64(__p0_791, __p1_791, __p2_791) __extension__ ({ \
+  uint32x2_t __s0_791 = __p0_791; \
+  uint64x2_t __s1_791 = __p1_791; \
+  uint32x4_t __ret_791; \
+  __ret_791 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_791), (uint32x2_t)(vqshrn_n_u64(__s1_791, __p2_791)))); \
+  __ret_791; \
 })
 #else
-#define vqshrn_high_n_u64(__p0_776, __p1_776, __p2_776) __extension__ ({ \
-  uint32x2_t __s0_776 = __p0_776; \
-  uint64x2_t __s1_776 = __p1_776; \
-  uint32x2_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 1, 0); \
-  uint64x2_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 1, 0); \
-  uint32x4_t __ret_776; \
-  __ret_776 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_776), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_776, __p2_776)))); \
-  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 3, 2, 1, 0); \
-  __ret_776; \
+#define vqshrn_high_n_u64(__p0_792, __p1_792, __p2_792) __extension__ ({ \
+  uint32x2_t __s0_792 = __p0_792; \
+  uint64x2_t __s1_792 = __p1_792; \
+  uint32x2_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 1, 0); \
+  uint64x2_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 1, 0); \
+  uint32x4_t __ret_792; \
+  __ret_792 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_792), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_792, __p2_792)))); \
+  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 3, 2, 1, 0); \
+  __ret_792; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u16(__p0_777, __p1_777, __p2_777) __extension__ ({ \
-  uint8x8_t __s0_777 = __p0_777; \
-  uint16x8_t __s1_777 = __p1_777; \
-  uint8x16_t __ret_777; \
-  __ret_777 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_777), (uint8x8_t)(vqshrn_n_u16(__s1_777, __p2_777)))); \
-  __ret_777; \
+#define vqshrn_high_n_u16(__p0_793, __p1_793, __p2_793) __extension__ ({ \
+  uint8x8_t __s0_793 = __p0_793; \
+  uint16x8_t __s1_793 = __p1_793; \
+  uint8x16_t __ret_793; \
+  __ret_793 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_793), (uint8x8_t)(vqshrn_n_u16(__s1_793, __p2_793)))); \
+  __ret_793; \
 })
 #else
-#define vqshrn_high_n_u16(__p0_778, __p1_778, __p2_778) __extension__ ({ \
-  uint8x8_t __s0_778 = __p0_778; \
-  uint16x8_t __s1_778 = __p1_778; \
-  uint8x8_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_778; \
-  __ret_778 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_778), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_778, __p2_778)))); \
-  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_778; \
+#define vqshrn_high_n_u16(__p0_794, __p1_794, __p2_794) __extension__ ({ \
+  uint8x8_t __s0_794 = __p0_794; \
+  uint16x8_t __s1_794 = __p1_794; \
+  uint8x8_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_794; \
+  __ret_794 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_794), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_794, __p2_794)))); \
+  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_794; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s32(__p0_779, __p1_779, __p2_779) __extension__ ({ \
-  int16x4_t __s0_779 = __p0_779; \
-  int32x4_t __s1_779 = __p1_779; \
-  int16x8_t __ret_779; \
-  __ret_779 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_779), (int16x4_t)(vqshrn_n_s32(__s1_779, __p2_779)))); \
-  __ret_779; \
+#define vqshrn_high_n_s32(__p0_795, __p1_795, __p2_795) __extension__ ({ \
+  int16x4_t __s0_795 = __p0_795; \
+  int32x4_t __s1_795 = __p1_795; \
+  int16x8_t __ret_795; \
+  __ret_795 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_795), (int16x4_t)(vqshrn_n_s32(__s1_795, __p2_795)))); \
+  __ret_795; \
 })
 #else
-#define vqshrn_high_n_s32(__p0_780, __p1_780, __p2_780) __extension__ ({ \
-  int16x4_t __s0_780 = __p0_780; \
-  int32x4_t __s1_780 = __p1_780; \
-  int16x4_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \
-  int32x4_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 3, 2, 1, 0); \
-  int16x8_t __ret_780; \
-  __ret_780 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_780), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_780, __p2_780)))); \
-  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_780; \
+#define vqshrn_high_n_s32(__p0_796, __p1_796, __p2_796) __extension__ ({ \
+  int16x4_t __s0_796 = __p0_796; \
+  int32x4_t __s1_796 = __p1_796; \
+  int16x4_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 3, 2, 1, 0); \
+  int32x4_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 3, 2, 1, 0); \
+  int16x8_t __ret_796; \
+  __ret_796 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_796), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_796, __p2_796)))); \
+  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_796; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s64(__p0_781, __p1_781, __p2_781) __extension__ ({ \
-  int32x2_t __s0_781 = __p0_781; \
-  int64x2_t __s1_781 = __p1_781; \
-  int32x4_t __ret_781; \
-  __ret_781 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_781), (int32x2_t)(vqshrn_n_s64(__s1_781, __p2_781)))); \
-  __ret_781; \
+#define vqshrn_high_n_s64(__p0_797, __p1_797, __p2_797) __extension__ ({ \
+  int32x2_t __s0_797 = __p0_797; \
+  int64x2_t __s1_797 = __p1_797; \
+  int32x4_t __ret_797; \
+  __ret_797 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_797), (int32x2_t)(vqshrn_n_s64(__s1_797, __p2_797)))); \
+  __ret_797; \
 })
 #else
-#define vqshrn_high_n_s64(__p0_782, __p1_782, __p2_782) __extension__ ({ \
-  int32x2_t __s0_782 = __p0_782; \
-  int64x2_t __s1_782 = __p1_782; \
-  int32x2_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \
-  int64x2_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 1, 0); \
-  int32x4_t __ret_782; \
-  __ret_782 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_782), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_782, __p2_782)))); \
-  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 3, 2, 1, 0); \
-  __ret_782; \
+#define vqshrn_high_n_s64(__p0_798, __p1_798, __p2_798) __extension__ ({ \
+  int32x2_t __s0_798 = __p0_798; \
+  int64x2_t __s1_798 = __p1_798; \
+  int32x2_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 1, 0); \
+  int64x2_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 1, 0); \
+  int32x4_t __ret_798; \
+  __ret_798 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_798), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_798, __p2_798)))); \
+  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 3, 2, 1, 0); \
+  __ret_798; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s16(__p0_783, __p1_783, __p2_783) __extension__ ({ \
-  int8x8_t __s0_783 = __p0_783; \
-  int16x8_t __s1_783 = __p1_783; \
-  int8x16_t __ret_783; \
-  __ret_783 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_783), (int8x8_t)(vqshrn_n_s16(__s1_783, __p2_783)))); \
-  __ret_783; \
+#define vqshrn_high_n_s16(__p0_799, __p1_799, __p2_799) __extension__ ({ \
+  int8x8_t __s0_799 = __p0_799; \
+  int16x8_t __s1_799 = __p1_799; \
+  int8x16_t __ret_799; \
+  __ret_799 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_799), (int8x8_t)(vqshrn_n_s16(__s1_799, __p2_799)))); \
+  __ret_799; \
 })
 #else
-#define vqshrn_high_n_s16(__p0_784, __p1_784, __p2_784) __extension__ ({ \
-  int8x8_t __s0_784 = __p0_784; \
-  int16x8_t __s1_784 = __p1_784; \
-  int8x8_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_784; \
-  __ret_784 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_784), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_784, __p2_784)))); \
-  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_784; \
+#define vqshrn_high_n_s16(__p0_800, __p1_800, __p2_800) __extension__ ({ \
+  int8x8_t __s0_800 = __p0_800; \
+  int16x8_t __s1_800 = __p1_800; \
+  int8x8_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_800; \
+  __ret_800 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_800), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_800, __p2_800)))); \
+  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_800; \
 })
 #endif
 
@@ -60689,65 +60877,65 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s32(__p0_785, __p1_785, __p2_785) __extension__ ({ \
-  int16x4_t __s0_785 = __p0_785; \
-  int32x4_t __s1_785 = __p1_785; \
-  int16x8_t __ret_785; \
-  __ret_785 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_785), (int16x4_t)(vqshrun_n_s32(__s1_785, __p2_785)))); \
-  __ret_785; \
+#define vqshrun_high_n_s32(__p0_801, __p1_801, __p2_801) __extension__ ({ \
+  int16x4_t __s0_801 = __p0_801; \
+  int32x4_t __s1_801 = __p1_801; \
+  int16x8_t __ret_801; \
+  __ret_801 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_801), (int16x4_t)(vqshrun_n_s32(__s1_801, __p2_801)))); \
+  __ret_801; \
 })
 #else
-#define vqshrun_high_n_s32(__p0_786, __p1_786, __p2_786) __extension__ ({ \
-  int16x4_t __s0_786 = __p0_786; \
-  int32x4_t __s1_786 = __p1_786; \
-  int16x4_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 3, 2, 1, 0); \
-  int32x4_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 3, 2, 1, 0); \
-  int16x8_t __ret_786; \
-  __ret_786 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_786), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_786, __p2_786)))); \
-  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_786; \
+#define vqshrun_high_n_s32(__p0_802, __p1_802, __p2_802) __extension__ ({ \
+  int16x4_t __s0_802 = __p0_802; \
+  int32x4_t __s1_802 = __p1_802; \
+  int16x4_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 3, 2, 1, 0); \
+  int32x4_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 3, 2, 1, 0); \
+  int16x8_t __ret_802; \
+  __ret_802 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_802), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_802, __p2_802)))); \
+  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_802; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s64(__p0_787, __p1_787, __p2_787) __extension__ ({ \
-  int32x2_t __s0_787 = __p0_787; \
-  int64x2_t __s1_787 = __p1_787; \
-  int32x4_t __ret_787; \
-  __ret_787 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_787), (int32x2_t)(vqshrun_n_s64(__s1_787, __p2_787)))); \
-  __ret_787; \
+#define vqshrun_high_n_s64(__p0_803, __p1_803, __p2_803) __extension__ ({ \
+  int32x2_t __s0_803 = __p0_803; \
+  int64x2_t __s1_803 = __p1_803; \
+  int32x4_t __ret_803; \
+  __ret_803 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_803), (int32x2_t)(vqshrun_n_s64(__s1_803, __p2_803)))); \
+  __ret_803; \
 })
 #else
-#define vqshrun_high_n_s64(__p0_788, __p1_788, __p2_788) __extension__ ({ \
-  int32x2_t __s0_788 = __p0_788; \
-  int64x2_t __s1_788 = __p1_788; \
-  int32x2_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 1, 0); \
-  int64x2_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 1, 0); \
-  int32x4_t __ret_788; \
-  __ret_788 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_788), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_788, __p2_788)))); \
-  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 3, 2, 1, 0); \
-  __ret_788; \
+#define vqshrun_high_n_s64(__p0_804, __p1_804, __p2_804) __extension__ ({ \
+  int32x2_t __s0_804 = __p0_804; \
+  int64x2_t __s1_804 = __p1_804; \
+  int32x2_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 1, 0); \
+  int64x2_t __rev1_804;  __rev1_804 = __builtin_shufflevector(__s1_804, __s1_804, 1, 0); \
+  int32x4_t __ret_804; \
+  __ret_804 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_804), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_804, __p2_804)))); \
+  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 3, 2, 1, 0); \
+  __ret_804; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s16(__p0_789, __p1_789, __p2_789) __extension__ ({ \
-  int8x8_t __s0_789 = __p0_789; \
-  int16x8_t __s1_789 = __p1_789; \
-  int8x16_t __ret_789; \
-  __ret_789 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_789), (int8x8_t)(vqshrun_n_s16(__s1_789, __p2_789)))); \
-  __ret_789; \
+#define vqshrun_high_n_s16(__p0_805, __p1_805, __p2_805) __extension__ ({ \
+  int8x8_t __s0_805 = __p0_805; \
+  int16x8_t __s1_805 = __p1_805; \
+  int8x16_t __ret_805; \
+  __ret_805 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_805), (int8x8_t)(vqshrun_n_s16(__s1_805, __p2_805)))); \
+  __ret_805; \
 })
 #else
-#define vqshrun_high_n_s16(__p0_790, __p1_790, __p2_790) __extension__ ({ \
-  int8x8_t __s0_790 = __p0_790; \
-  int16x8_t __s1_790 = __p1_790; \
-  int8x8_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_790; \
-  __ret_790 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_790), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_790, __p2_790)))); \
-  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_790; \
+#define vqshrun_high_n_s16(__p0_806, __p1_806, __p2_806) __extension__ ({ \
+  int8x8_t __s0_806 = __p0_806; \
+  int16x8_t __s1_806 = __p1_806; \
+  int8x8_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_806;  __rev1_806 = __builtin_shufflevector(__s1_806, __s1_806, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_806; \
+  __ret_806 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_806), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_806, __p2_806)))); \
+  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_806; \
 })
 #endif
 
@@ -62057,128 +62245,128 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u32(__p0_791, __p1_791, __p2_791) __extension__ ({ \
-  uint16x4_t __s0_791 = __p0_791; \
-  uint32x4_t __s1_791 = __p1_791; \
-  uint16x8_t __ret_791; \
-  __ret_791 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_791), (uint16x4_t)(vrshrn_n_u32(__s1_791, __p2_791)))); \
-  __ret_791; \
+#define vrshrn_high_n_u32(__p0_807, __p1_807, __p2_807) __extension__ ({ \
+  uint16x4_t __s0_807 = __p0_807; \
+  uint32x4_t __s1_807 = __p1_807; \
+  uint16x8_t __ret_807; \
+  __ret_807 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_807), (uint16x4_t)(vrshrn_n_u32(__s1_807, __p2_807)))); \
+  __ret_807; \
 })
 #else
-#define vrshrn_high_n_u32(__p0_792, __p1_792, __p2_792) __extension__ ({ \
-  uint16x4_t __s0_792 = __p0_792; \
-  uint32x4_t __s1_792 = __p1_792; \
-  uint16x4_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 3, 2, 1, 0); \
-  uint32x4_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 3, 2, 1, 0); \
-  uint16x8_t __ret_792; \
-  __ret_792 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_792), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_792, __p2_792)))); \
-  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_792; \
+#define vrshrn_high_n_u32(__p0_808, __p1_808, __p2_808) __extension__ ({ \
+  uint16x4_t __s0_808 = __p0_808; \
+  uint32x4_t __s1_808 = __p1_808; \
+  uint16x4_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 3, 2, 1, 0); \
+  uint32x4_t __rev1_808;  __rev1_808 = __builtin_shufflevector(__s1_808, __s1_808, 3, 2, 1, 0); \
+  uint16x8_t __ret_808; \
+  __ret_808 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_808), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_808, __p2_808)))); \
+  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_808; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u64(__p0_793, __p1_793, __p2_793) __extension__ ({ \
-  uint32x2_t __s0_793 = __p0_793; \
-  uint64x2_t __s1_793 = __p1_793; \
-  uint32x4_t __ret_793; \
-  __ret_793 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_793), (uint32x2_t)(vrshrn_n_u64(__s1_793, __p2_793)))); \
-  __ret_793; \
+#define vrshrn_high_n_u64(__p0_809, __p1_809, __p2_809) __extension__ ({ \
+  uint32x2_t __s0_809 = __p0_809; \
+  uint64x2_t __s1_809 = __p1_809; \
+  uint32x4_t __ret_809; \
+  __ret_809 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_809), (uint32x2_t)(vrshrn_n_u64(__s1_809, __p2_809)))); \
+  __ret_809; \
 })
 #else
-#define vrshrn_high_n_u64(__p0_794, __p1_794, __p2_794) __extension__ ({ \
-  uint32x2_t __s0_794 = __p0_794; \
-  uint64x2_t __s1_794 = __p1_794; \
-  uint32x2_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 1, 0); \
-  uint64x2_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 1, 0); \
-  uint32x4_t __ret_794; \
-  __ret_794 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_794), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_794, __p2_794)))); \
-  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 3, 2, 1, 0); \
-  __ret_794; \
+#define vrshrn_high_n_u64(__p0_810, __p1_810, __p2_810) __extension__ ({ \
+  uint32x2_t __s0_810 = __p0_810; \
+  uint64x2_t __s1_810 = __p1_810; \
+  uint32x2_t __rev0_810;  __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 1, 0); \
+  uint64x2_t __rev1_810;  __rev1_810 = __builtin_shufflevector(__s1_810, __s1_810, 1, 0); \
+  uint32x4_t __ret_810; \
+  __ret_810 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_810), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_810, __p2_810)))); \
+  __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 3, 2, 1, 0); \
+  __ret_810; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u16(__p0_795, __p1_795, __p2_795) __extension__ ({ \
-  uint8x8_t __s0_795 = __p0_795; \
-  uint16x8_t __s1_795 = __p1_795; \
-  uint8x16_t __ret_795; \
-  __ret_795 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_795), (uint8x8_t)(vrshrn_n_u16(__s1_795, __p2_795)))); \
-  __ret_795; \
+#define vrshrn_high_n_u16(__p0_811, __p1_811, __p2_811) __extension__ ({ \
+  uint8x8_t __s0_811 = __p0_811; \
+  uint16x8_t __s1_811 = __p1_811; \
+  uint8x16_t __ret_811; \
+  __ret_811 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_811), (uint8x8_t)(vrshrn_n_u16(__s1_811, __p2_811)))); \
+  __ret_811; \
 })
 #else
-#define vrshrn_high_n_u16(__p0_796, __p1_796, __p2_796) __extension__ ({ \
-  uint8x8_t __s0_796 = __p0_796; \
-  uint16x8_t __s1_796 = __p1_796; \
-  uint8x8_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_796; \
-  __ret_796 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_796), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_796, __p2_796)))); \
-  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_796; \
+#define vrshrn_high_n_u16(__p0_812, __p1_812, __p2_812) __extension__ ({ \
+  uint8x8_t __s0_812 = __p0_812; \
+  uint16x8_t __s1_812 = __p1_812; \
+  uint8x8_t __rev0_812;  __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_812;  __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_812; \
+  __ret_812 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_812), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_812, __p2_812)))); \
+  __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_812; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s32(__p0_797, __p1_797, __p2_797) __extension__ ({ \
-  int16x4_t __s0_797 = __p0_797; \
-  int32x4_t __s1_797 = __p1_797; \
-  int16x8_t __ret_797; \
-  __ret_797 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_797), (int16x4_t)(vrshrn_n_s32(__s1_797, __p2_797)))); \
-  __ret_797; \
+#define vrshrn_high_n_s32(__p0_813, __p1_813, __p2_813) __extension__ ({ \
+  int16x4_t __s0_813 = __p0_813; \
+  int32x4_t __s1_813 = __p1_813; \
+  int16x8_t __ret_813; \
+  __ret_813 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_813), (int16x4_t)(vrshrn_n_s32(__s1_813, __p2_813)))); \
+  __ret_813; \
 })
 #else
-#define vrshrn_high_n_s32(__p0_798, __p1_798, __p2_798) __extension__ ({ \
-  int16x4_t __s0_798 = __p0_798; \
-  int32x4_t __s1_798 = __p1_798; \
-  int16x4_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 3, 2, 1, 0); \
-  int32x4_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 3, 2, 1, 0); \
-  int16x8_t __ret_798; \
-  __ret_798 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_798), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_798, __p2_798)))); \
-  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_798; \
+#define vrshrn_high_n_s32(__p0_814, __p1_814, __p2_814) __extension__ ({ \
+  int16x4_t __s0_814 = __p0_814; \
+  int32x4_t __s1_814 = __p1_814; \
+  int16x4_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 3, 2, 1, 0); \
+  int32x4_t __rev1_814;  __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 3, 2, 1, 0); \
+  int16x8_t __ret_814; \
+  __ret_814 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_814), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_814, __p2_814)))); \
+  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_814; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s64(__p0_799, __p1_799, __p2_799) __extension__ ({ \
-  int32x2_t __s0_799 = __p0_799; \
-  int64x2_t __s1_799 = __p1_799; \
-  int32x4_t __ret_799; \
-  __ret_799 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_799), (int32x2_t)(vrshrn_n_s64(__s1_799, __p2_799)))); \
-  __ret_799; \
+#define vrshrn_high_n_s64(__p0_815, __p1_815, __p2_815) __extension__ ({ \
+  int32x2_t __s0_815 = __p0_815; \
+  int64x2_t __s1_815 = __p1_815; \
+  int32x4_t __ret_815; \
+  __ret_815 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_815), (int32x2_t)(vrshrn_n_s64(__s1_815, __p2_815)))); \
+  __ret_815; \
 })
 #else
-#define vrshrn_high_n_s64(__p0_800, __p1_800, __p2_800) __extension__ ({ \
-  int32x2_t __s0_800 = __p0_800; \
-  int64x2_t __s1_800 = __p1_800; \
-  int32x2_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \
-  int64x2_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \
-  int32x4_t __ret_800; \
-  __ret_800 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_800), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_800, __p2_800)))); \
-  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 3, 2, 1, 0); \
-  __ret_800; \
+#define vrshrn_high_n_s64(__p0_816, __p1_816, __p2_816) __extension__ ({ \
+  int32x2_t __s0_816 = __p0_816; \
+  int64x2_t __s1_816 = __p1_816; \
+  int32x2_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 1, 0); \
+  int64x2_t __rev1_816;  __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 1, 0); \
+  int32x4_t __ret_816; \
+  __ret_816 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_816), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_816, __p2_816)))); \
+  __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 3, 2, 1, 0); \
+  __ret_816; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s16(__p0_801, __p1_801, __p2_801) __extension__ ({ \
-  int8x8_t __s0_801 = __p0_801; \
-  int16x8_t __s1_801 = __p1_801; \
-  int8x16_t __ret_801; \
-  __ret_801 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_801), (int8x8_t)(vrshrn_n_s16(__s1_801, __p2_801)))); \
-  __ret_801; \
+#define vrshrn_high_n_s16(__p0_817, __p1_817, __p2_817) __extension__ ({ \
+  int8x8_t __s0_817 = __p0_817; \
+  int16x8_t __s1_817 = __p1_817; \
+  int8x16_t __ret_817; \
+  __ret_817 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_817), (int8x8_t)(vrshrn_n_s16(__s1_817, __p2_817)))); \
+  __ret_817; \
 })
 #else
-#define vrshrn_high_n_s16(__p0_802, __p1_802, __p2_802) __extension__ ({ \
-  int8x8_t __s0_802 = __p0_802; \
-  int16x8_t __s1_802 = __p1_802; \
-  int8x8_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_802; \
-  __ret_802 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_802), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_802, __p2_802)))); \
-  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_802; \
+#define vrshrn_high_n_s16(__p0_818, __p1_818, __p2_818) __extension__ ({ \
+  int8x8_t __s0_818 = __p0_818; \
+  int16x8_t __s1_818 = __p1_818; \
+  int8x8_t __rev0_818;  __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_818;  __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_818; \
+  __ret_818 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_818), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_818, __p2_818)))); \
+  __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_818; \
 })
 #endif
 
@@ -62458,110 +62646,110 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u8(__p0_803, __p1_803) __extension__ ({ \
-  uint8x16_t __s0_803 = __p0_803; \
-  uint16x8_t __ret_803; \
-  __ret_803 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_803), __p1_803)); \
-  __ret_803; \
+#define vshll_high_n_u8(__p0_819, __p1_819) __extension__ ({ \
+  uint8x16_t __s0_819 = __p0_819; \
+  uint16x8_t __ret_819; \
+  __ret_819 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_819), __p1_819)); \
+  __ret_819; \
 })
 #else
-#define vshll_high_n_u8(__p0_804, __p1_804) __extension__ ({ \
-  uint8x16_t __s0_804 = __p0_804; \
-  uint8x16_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_804; \
-  __ret_804 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_804), __p1_804)); \
-  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_804; \
+#define vshll_high_n_u8(__p0_820, __p1_820) __extension__ ({ \
+  uint8x16_t __s0_820 = __p0_820; \
+  uint8x16_t __rev0_820;  __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_820; \
+  __ret_820 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_820), __p1_820)); \
+  __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_820; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u32(__p0_805, __p1_805) __extension__ ({ \
-  uint32x4_t __s0_805 = __p0_805; \
-  uint64x2_t __ret_805; \
-  __ret_805 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_805), __p1_805)); \
-  __ret_805; \
+#define vshll_high_n_u32(__p0_821, __p1_821) __extension__ ({ \
+  uint32x4_t __s0_821 = __p0_821; \
+  uint64x2_t __ret_821; \
+  __ret_821 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_821), __p1_821)); \
+  __ret_821; \
 })
 #else
-#define vshll_high_n_u32(__p0_806, __p1_806) __extension__ ({ \
-  uint32x4_t __s0_806 = __p0_806; \
-  uint32x4_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 3, 2, 1, 0); \
-  uint64x2_t __ret_806; \
-  __ret_806 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_806), __p1_806)); \
-  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 1, 0); \
-  __ret_806; \
+#define vshll_high_n_u32(__p0_822, __p1_822) __extension__ ({ \
+  uint32x4_t __s0_822 = __p0_822; \
+  uint32x4_t __rev0_822;  __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 3, 2, 1, 0); \
+  uint64x2_t __ret_822; \
+  __ret_822 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_822), __p1_822)); \
+  __ret_822 = __builtin_shufflevector(__ret_822, __ret_822, 1, 0); \
+  __ret_822; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u16(__p0_807, __p1_807) __extension__ ({ \
-  uint16x8_t __s0_807 = __p0_807; \
-  uint32x4_t __ret_807; \
-  __ret_807 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_807), __p1_807)); \
-  __ret_807; \
+#define vshll_high_n_u16(__p0_823, __p1_823) __extension__ ({ \
+  uint16x8_t __s0_823 = __p0_823; \
+  uint32x4_t __ret_823; \
+  __ret_823 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_823), __p1_823)); \
+  __ret_823; \
 })
 #else
-#define vshll_high_n_u16(__p0_808, __p1_808) __extension__ ({ \
-  uint16x8_t __s0_808 = __p0_808; \
-  uint16x8_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_808; \
-  __ret_808 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_808), __p1_808)); \
-  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 3, 2, 1, 0); \
-  __ret_808; \
+#define vshll_high_n_u16(__p0_824, __p1_824) __extension__ ({ \
+  uint16x8_t __s0_824 = __p0_824; \
+  uint16x8_t __rev0_824;  __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_824; \
+  __ret_824 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_824), __p1_824)); \
+  __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 3, 2, 1, 0); \
+  __ret_824; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s8(__p0_809, __p1_809) __extension__ ({ \
-  int8x16_t __s0_809 = __p0_809; \
-  int16x8_t __ret_809; \
-  __ret_809 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_809), __p1_809)); \
-  __ret_809; \
+#define vshll_high_n_s8(__p0_825, __p1_825) __extension__ ({ \
+  int8x16_t __s0_825 = __p0_825; \
+  int16x8_t __ret_825; \
+  __ret_825 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_825), __p1_825)); \
+  __ret_825; \
 })
 #else
-#define vshll_high_n_s8(__p0_810, __p1_810) __extension__ ({ \
-  int8x16_t __s0_810 = __p0_810; \
-  int8x16_t __rev0_810;  __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_810; \
-  __ret_810 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_810), __p1_810)); \
-  __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_810; \
+#define vshll_high_n_s8(__p0_826, __p1_826) __extension__ ({ \
+  int8x16_t __s0_826 = __p0_826; \
+  int8x16_t __rev0_826;  __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_826; \
+  __ret_826 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_826), __p1_826)); \
+  __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_826; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s32(__p0_811, __p1_811) __extension__ ({ \
-  int32x4_t __s0_811 = __p0_811; \
-  int64x2_t __ret_811; \
-  __ret_811 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_811), __p1_811)); \
-  __ret_811; \
+#define vshll_high_n_s32(__p0_827, __p1_827) __extension__ ({ \
+  int32x4_t __s0_827 = __p0_827; \
+  int64x2_t __ret_827; \
+  __ret_827 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_827), __p1_827)); \
+  __ret_827; \
 })
 #else
-#define vshll_high_n_s32(__p0_812, __p1_812) __extension__ ({ \
-  int32x4_t __s0_812 = __p0_812; \
-  int32x4_t __rev0_812;  __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 3, 2, 1, 0); \
-  int64x2_t __ret_812; \
-  __ret_812 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_812), __p1_812)); \
-  __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 1, 0); \
-  __ret_812; \
+#define vshll_high_n_s32(__p0_828, __p1_828) __extension__ ({ \
+  int32x4_t __s0_828 = __p0_828; \
+  int32x4_t __rev0_828;  __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \
+  int64x2_t __ret_828; \
+  __ret_828 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_828), __p1_828)); \
+  __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 1, 0); \
+  __ret_828; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s16(__p0_813, __p1_813) __extension__ ({ \
-  int16x8_t __s0_813 = __p0_813; \
-  int32x4_t __ret_813; \
-  __ret_813 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_813), __p1_813)); \
-  __ret_813; \
+#define vshll_high_n_s16(__p0_829, __p1_829) __extension__ ({ \
+  int16x8_t __s0_829 = __p0_829; \
+  int32x4_t __ret_829; \
+  __ret_829 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_829), __p1_829)); \
+  __ret_829; \
 })
 #else
-#define vshll_high_n_s16(__p0_814, __p1_814) __extension__ ({ \
-  int16x8_t __s0_814 = __p0_814; \
-  int16x8_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_814; \
-  __ret_814 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_814), __p1_814)); \
-  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 3, 2, 1, 0); \
-  __ret_814; \
+#define vshll_high_n_s16(__p0_830, __p1_830) __extension__ ({ \
+  int16x8_t __s0_830 = __p0_830; \
+  int16x8_t __rev0_830;  __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_830; \
+  __ret_830 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_830), __p1_830)); \
+  __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 3, 2, 1, 0); \
+  __ret_830; \
 })
 #endif
 
@@ -62578,128 +62766,128 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u32(__p0_815, __p1_815, __p2_815) __extension__ ({ \
-  uint16x4_t __s0_815 = __p0_815; \
-  uint32x4_t __s1_815 = __p1_815; \
-  uint16x8_t __ret_815; \
-  __ret_815 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_815), (uint16x4_t)(vshrn_n_u32(__s1_815, __p2_815)))); \
-  __ret_815; \
+#define vshrn_high_n_u32(__p0_831, __p1_831, __p2_831) __extension__ ({ \
+  uint16x4_t __s0_831 = __p0_831; \
+  uint32x4_t __s1_831 = __p1_831; \
+  uint16x8_t __ret_831; \
+  __ret_831 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_831), (uint16x4_t)(vshrn_n_u32(__s1_831, __p2_831)))); \
+  __ret_831; \
 })
 #else
-#define vshrn_high_n_u32(__p0_816, __p1_816, __p2_816) __extension__ ({ \
-  uint16x4_t __s0_816 = __p0_816; \
-  uint32x4_t __s1_816 = __p1_816; \
-  uint16x4_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 3, 2, 1, 0); \
-  uint32x4_t __rev1_816;  __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 3, 2, 1, 0); \
-  uint16x8_t __ret_816; \
-  __ret_816 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_816), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_816, __p2_816)))); \
-  __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_816; \
+#define vshrn_high_n_u32(__p0_832, __p1_832, __p2_832) __extension__ ({ \
+  uint16x4_t __s0_832 = __p0_832; \
+  uint32x4_t __s1_832 = __p1_832; \
+  uint16x4_t __rev0_832;  __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 3, 2, 1, 0); \
+  uint32x4_t __rev1_832;  __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 3, 2, 1, 0); \
+  uint16x8_t __ret_832; \
+  __ret_832 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_832), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_832, __p2_832)))); \
+  __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_832; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u64(__p0_817, __p1_817, __p2_817) __extension__ ({ \
-  uint32x2_t __s0_817 = __p0_817; \
-  uint64x2_t __s1_817 = __p1_817; \
-  uint32x4_t __ret_817; \
-  __ret_817 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_817), (uint32x2_t)(vshrn_n_u64(__s1_817, __p2_817)))); \
-  __ret_817; \
+#define vshrn_high_n_u64(__p0_833, __p1_833, __p2_833) __extension__ ({ \
+  uint32x2_t __s0_833 = __p0_833; \
+  uint64x2_t __s1_833 = __p1_833; \
+  uint32x4_t __ret_833; \
+  __ret_833 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_833), (uint32x2_t)(vshrn_n_u64(__s1_833, __p2_833)))); \
+  __ret_833; \
 })
 #else
-#define vshrn_high_n_u64(__p0_818, __p1_818, __p2_818) __extension__ ({ \
-  uint32x2_t __s0_818 = __p0_818; \
-  uint64x2_t __s1_818 = __p1_818; \
-  uint32x2_t __rev0_818;  __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 1, 0); \
-  uint64x2_t __rev1_818;  __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 1, 0); \
-  uint32x4_t __ret_818; \
-  __ret_818 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_818), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_818, __p2_818)))); \
-  __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 3, 2, 1, 0); \
-  __ret_818; \
+#define vshrn_high_n_u64(__p0_834, __p1_834, __p2_834) __extension__ ({ \
+  uint32x2_t __s0_834 = __p0_834; \
+  uint64x2_t __s1_834 = __p1_834; \
+  uint32x2_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \
+  uint64x2_t __rev1_834;  __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 1, 0); \
+  uint32x4_t __ret_834; \
+  __ret_834 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_834), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_834, __p2_834)))); \
+  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 3, 2, 1, 0); \
+  __ret_834; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u16(__p0_819, __p1_819, __p2_819) __extension__ ({ \
-  uint8x8_t __s0_819 = __p0_819; \
-  uint16x8_t __s1_819 = __p1_819; \
-  uint8x16_t __ret_819; \
-  __ret_819 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_819), (uint8x8_t)(vshrn_n_u16(__s1_819, __p2_819)))); \
-  __ret_819; \
+#define vshrn_high_n_u16(__p0_835, __p1_835, __p2_835) __extension__ ({ \
+  uint8x8_t __s0_835 = __p0_835; \
+  uint16x8_t __s1_835 = __p1_835; \
+  uint8x16_t __ret_835; \
+  __ret_835 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_835), (uint8x8_t)(vshrn_n_u16(__s1_835, __p2_835)))); \
+  __ret_835; \
 })
 #else
-#define vshrn_high_n_u16(__p0_820, __p1_820, __p2_820) __extension__ ({ \
-  uint8x8_t __s0_820 = __p0_820; \
-  uint16x8_t __s1_820 = __p1_820; \
-  uint8x8_t __rev0_820;  __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_820;  __rev1_820 = __builtin_shufflevector(__s1_820, __s1_820, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_820; \
-  __ret_820 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_820), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_820, __p2_820)))); \
-  __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_820; \
+#define vshrn_high_n_u16(__p0_836, __p1_836, __p2_836) __extension__ ({ \
+  uint8x8_t __s0_836 = __p0_836; \
+  uint16x8_t __s1_836 = __p1_836; \
+  uint8x8_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_836;  __rev1_836 = __builtin_shufflevector(__s1_836, __s1_836, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_836; \
+  __ret_836 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_836), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_836, __p2_836)))); \
+  __ret_836 = __builtin_shufflevector(__ret_836, __ret_836, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_836; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s32(__p0_821, __p1_821, __p2_821) __extension__ ({ \
-  int16x4_t __s0_821 = __p0_821; \
-  int32x4_t __s1_821 = __p1_821; \
-  int16x8_t __ret_821; \
-  __ret_821 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_821), (int16x4_t)(vshrn_n_s32(__s1_821, __p2_821)))); \
-  __ret_821; \
+#define vshrn_high_n_s32(__p0_837, __p1_837, __p2_837) __extension__ ({ \
+  int16x4_t __s0_837 = __p0_837; \
+  int32x4_t __s1_837 = __p1_837; \
+  int16x8_t __ret_837; \
+  __ret_837 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_837), (int16x4_t)(vshrn_n_s32(__s1_837, __p2_837)))); \
+  __ret_837; \
 })
 #else
-#define vshrn_high_n_s32(__p0_822, __p1_822, __p2_822) __extension__ ({ \
-  int16x4_t __s0_822 = __p0_822; \
-  int32x4_t __s1_822 = __p1_822; \
-  int16x4_t __rev0_822;  __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 3, 2, 1, 0); \
-  int32x4_t __rev1_822;  __rev1_822 = __builtin_shufflevector(__s1_822, __s1_822, 3, 2, 1, 0); \
-  int16x8_t __ret_822; \
-  __ret_822 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_822), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_822, __p2_822)))); \
-  __ret_822 = __builtin_shufflevector(__ret_822, __ret_822, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_822; \
+#define vshrn_high_n_s32(__p0_838, __p1_838, __p2_838) __extension__ ({ \
+  int16x4_t __s0_838 = __p0_838; \
+  int32x4_t __s1_838 = __p1_838; \
+  int16x4_t __rev0_838;  __rev0_838 = __builtin_shufflevector(__s0_838, __s0_838, 3, 2, 1, 0); \
+  int32x4_t __rev1_838;  __rev1_838 = __builtin_shufflevector(__s1_838, __s1_838, 3, 2, 1, 0); \
+  int16x8_t __ret_838; \
+  __ret_838 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_838), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_838, __p2_838)))); \
+  __ret_838 = __builtin_shufflevector(__ret_838, __ret_838, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_838; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s64(__p0_823, __p1_823, __p2_823) __extension__ ({ \
-  int32x2_t __s0_823 = __p0_823; \
-  int64x2_t __s1_823 = __p1_823; \
-  int32x4_t __ret_823; \
-  __ret_823 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_823), (int32x2_t)(vshrn_n_s64(__s1_823, __p2_823)))); \
-  __ret_823; \
+#define vshrn_high_n_s64(__p0_839, __p1_839, __p2_839) __extension__ ({ \
+  int32x2_t __s0_839 = __p0_839; \
+  int64x2_t __s1_839 = __p1_839; \
+  int32x4_t __ret_839; \
+  __ret_839 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_839), (int32x2_t)(vshrn_n_s64(__s1_839, __p2_839)))); \
+  __ret_839; \
 })
 #else
-#define vshrn_high_n_s64(__p0_824, __p1_824, __p2_824) __extension__ ({ \
-  int32x2_t __s0_824 = __p0_824; \
-  int64x2_t __s1_824 = __p1_824; \
-  int32x2_t __rev0_824;  __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 1, 0); \
-  int64x2_t __rev1_824;  __rev1_824 = __builtin_shufflevector(__s1_824, __s1_824, 1, 0); \
-  int32x4_t __ret_824; \
-  __ret_824 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_824), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_824, __p2_824)))); \
-  __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 3, 2, 1, 0); \
-  __ret_824; \
+#define vshrn_high_n_s64(__p0_840, __p1_840, __p2_840) __extension__ ({ \
+  int32x2_t __s0_840 = __p0_840; \
+  int64x2_t __s1_840 = __p1_840; \
+  int32x2_t __rev0_840;  __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, 1, 0); \
+  int64x2_t __rev1_840;  __rev1_840 = __builtin_shufflevector(__s1_840, __s1_840, 1, 0); \
+  int32x4_t __ret_840; \
+  __ret_840 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_840), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_840, __p2_840)))); \
+  __ret_840 = __builtin_shufflevector(__ret_840, __ret_840, 3, 2, 1, 0); \
+  __ret_840; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s16(__p0_825, __p1_825, __p2_825) __extension__ ({ \
-  int8x8_t __s0_825 = __p0_825; \
-  int16x8_t __s1_825 = __p1_825; \
-  int8x16_t __ret_825; \
-  __ret_825 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_825), (int8x8_t)(vshrn_n_s16(__s1_825, __p2_825)))); \
-  __ret_825; \
+#define vshrn_high_n_s16(__p0_841, __p1_841, __p2_841) __extension__ ({ \
+  int8x8_t __s0_841 = __p0_841; \
+  int16x8_t __s1_841 = __p1_841; \
+  int8x16_t __ret_841; \
+  __ret_841 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_841), (int8x8_t)(vshrn_n_s16(__s1_841, __p2_841)))); \
+  __ret_841; \
 })
 #else
-#define vshrn_high_n_s16(__p0_826, __p1_826, __p2_826) __extension__ ({ \
-  int8x8_t __s0_826 = __p0_826; \
-  int16x8_t __s1_826 = __p1_826; \
-  int8x8_t __rev0_826;  __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_826;  __rev1_826 = __builtin_shufflevector(__s1_826, __s1_826, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_826; \
-  __ret_826 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_826), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_826, __p2_826)))); \
-  __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_826; \
+#define vshrn_high_n_s16(__p0_842, __p1_842, __p2_842) __extension__ ({ \
+  int8x8_t __s0_842 = __p0_842; \
+  int16x8_t __s1_842 = __p1_842; \
+  int8x8_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_842;  __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_842; \
+  __ret_842 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_842), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_842, __p2_842)))); \
+  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_842; \
 })
 #endif
 
@@ -64135,54 +64323,54 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsudotq_laneq_s32(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \
-  int32x4_t __s0_827 = __p0_827; \
-  int8x16_t __s1_827 = __p1_827; \
-  uint8x16_t __s2_827 = __p2_827; \
-  int32x4_t __ret_827; \
-uint8x16_t __reint_827 = __s2_827; \
-  __ret_827 = vusdotq_s32(__s0_827, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_827, __p3_827)), __s1_827); \
-  __ret_827; \
+#define vsudotq_laneq_s32(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
+  int32x4_t __s0_843 = __p0_843; \
+  int8x16_t __s1_843 = __p1_843; \
+  uint8x16_t __s2_843 = __p2_843; \
+  int32x4_t __ret_843; \
+uint8x16_t __reint_843 = __s2_843; \
+  __ret_843 = vusdotq_s32(__s0_843, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_843, __p3_843)), __s1_843); \
+  __ret_843; \
 })
 #else
-#define vsudotq_laneq_s32(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \
-  int32x4_t __s0_828 = __p0_828; \
-  int8x16_t __s1_828 = __p1_828; \
-  uint8x16_t __s2_828 = __p2_828; \
-  int32x4_t __rev0_828;  __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \
-  int8x16_t __rev1_828;  __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_828;  __rev2_828 = __builtin_shufflevector(__s2_828, __s2_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_828; \
-uint8x16_t __reint_828 = __rev2_828; \
-  __ret_828 = __noswap_vusdotq_s32(__rev0_828, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_828, __p3_828)), __rev1_828); \
-  __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 3, 2, 1, 0); \
-  __ret_828; \
+#define vsudotq_laneq_s32(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
+  int32x4_t __s0_844 = __p0_844; \
+  int8x16_t __s1_844 = __p1_844; \
+  uint8x16_t __s2_844 = __p2_844; \
+  int32x4_t __rev0_844;  __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \
+  int8x16_t __rev1_844;  __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_844; \
+uint8x16_t __reint_844 = __rev2_844; \
+  __ret_844 = __noswap_vusdotq_s32(__rev0_844, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_844, __p3_844)), __rev1_844); \
+  __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \
+  __ret_844; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsudot_laneq_s32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \
-  int32x2_t __s0_829 = __p0_829; \
-  int8x8_t __s1_829 = __p1_829; \
-  uint8x16_t __s2_829 = __p2_829; \
-  int32x2_t __ret_829; \
-uint8x16_t __reint_829 = __s2_829; \
-  __ret_829 = vusdot_s32(__s0_829, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_829, __p3_829)), __s1_829); \
-  __ret_829; \
+#define vsudot_laneq_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
+  int32x2_t __s0_845 = __p0_845; \
+  int8x8_t __s1_845 = __p1_845; \
+  uint8x16_t __s2_845 = __p2_845; \
+  int32x2_t __ret_845; \
+uint8x16_t __reint_845 = __s2_845; \
+  __ret_845 = vusdot_s32(__s0_845, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_845, __p3_845)), __s1_845); \
+  __ret_845; \
 })
 #else
-#define vsudot_laneq_s32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \
-  int32x2_t __s0_830 = __p0_830; \
-  int8x8_t __s1_830 = __p1_830; \
-  uint8x16_t __s2_830 = __p2_830; \
-  int32x2_t __rev0_830;  __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 1, 0); \
-  int8x8_t __rev1_830;  __rev1_830 = __builtin_shufflevector(__s1_830, __s1_830, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_830;  __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_830; \
-uint8x16_t __reint_830 = __rev2_830; \
-  __ret_830 = __noswap_vusdot_s32(__rev0_830, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_830, __p3_830)), __rev1_830); \
-  __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 1, 0); \
-  __ret_830; \
+#define vsudot_laneq_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
+  int32x2_t __s0_846 = __p0_846; \
+  int8x8_t __s1_846 = __p1_846; \
+  uint8x16_t __s2_846 = __p2_846; \
+  int32x2_t __rev0_846;  __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \
+  int8x8_t __rev1_846;  __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_846; \
+uint8x16_t __reint_846 = __rev2_846; \
+  __ret_846 = __noswap_vusdot_s32(__rev0_846, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_846, __p3_846)), __rev1_846); \
+  __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \
+  __ret_846; \
 })
 #endif
 
@@ -65155,54 +65343,54 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vusdotq_laneq_s32(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \
-  int32x4_t __s0_831 = __p0_831; \
-  uint8x16_t __s1_831 = __p1_831; \
-  int8x16_t __s2_831 = __p2_831; \
-  int32x4_t __ret_831; \
-int8x16_t __reint_831 = __s2_831; \
-  __ret_831 = vusdotq_s32(__s0_831, __s1_831, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_831, __p3_831))); \
-  __ret_831; \
+#define vusdotq_laneq_s32(__p0_847, __p1_847, __p2_847, __p3_847) __extension__ ({ \
+  int32x4_t __s0_847 = __p0_847; \
+  uint8x16_t __s1_847 = __p1_847; \
+  int8x16_t __s2_847 = __p2_847; \
+  int32x4_t __ret_847; \
+int8x16_t __reint_847 = __s2_847; \
+  __ret_847 = vusdotq_s32(__s0_847, __s1_847, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_847, __p3_847))); \
+  __ret_847; \
 })
 #else
-#define vusdotq_laneq_s32(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \
-  int32x4_t __s0_832 = __p0_832; \
-  uint8x16_t __s1_832 = __p1_832; \
-  int8x16_t __s2_832 = __p2_832; \
-  int32x4_t __rev0_832;  __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 3, 2, 1, 0); \
-  uint8x16_t __rev1_832;  __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_832;  __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_832; \
-int8x16_t __reint_832 = __rev2_832; \
-  __ret_832 = __noswap_vusdotq_s32(__rev0_832, __rev1_832, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_832, __p3_832))); \
-  __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 3, 2, 1, 0); \
-  __ret_832; \
+#define vusdotq_laneq_s32(__p0_848, __p1_848, __p2_848, __p3_848) __extension__ ({ \
+  int32x4_t __s0_848 = __p0_848; \
+  uint8x16_t __s1_848 = __p1_848; \
+  int8x16_t __s2_848 = __p2_848; \
+  int32x4_t __rev0_848;  __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \
+  uint8x16_t __rev1_848;  __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_848;  __rev2_848 = __builtin_shufflevector(__s2_848, __s2_848, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_848; \
+int8x16_t __reint_848 = __rev2_848; \
+  __ret_848 = __noswap_vusdotq_s32(__rev0_848, __rev1_848, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_848, __p3_848))); \
+  __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 3, 2, 1, 0); \
+  __ret_848; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vusdot_laneq_s32(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \
-  int32x2_t __s0_833 = __p0_833; \
-  uint8x8_t __s1_833 = __p1_833; \
-  int8x16_t __s2_833 = __p2_833; \
-  int32x2_t __ret_833; \
-int8x16_t __reint_833 = __s2_833; \
-  __ret_833 = vusdot_s32(__s0_833, __s1_833, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_833, __p3_833))); \
-  __ret_833; \
+#define vusdot_laneq_s32(__p0_849, __p1_849, __p2_849, __p3_849) __extension__ ({ \
+  int32x2_t __s0_849 = __p0_849; \
+  uint8x8_t __s1_849 = __p1_849; \
+  int8x16_t __s2_849 = __p2_849; \
+  int32x2_t __ret_849; \
+int8x16_t __reint_849 = __s2_849; \
+  __ret_849 = vusdot_s32(__s0_849, __s1_849, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_849, __p3_849))); \
+  __ret_849; \
 })
 #else
-#define vusdot_laneq_s32(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \
-  int32x2_t __s0_834 = __p0_834; \
-  uint8x8_t __s1_834 = __p1_834; \
-  int8x16_t __s2_834 = __p2_834; \
-  int32x2_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \
-  uint8x8_t __rev1_834;  __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_834;  __rev2_834 = __builtin_shufflevector(__s2_834, __s2_834, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_834; \
-int8x16_t __reint_834 = __rev2_834; \
-  __ret_834 = __noswap_vusdot_s32(__rev0_834, __rev1_834, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_834, __p3_834))); \
-  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 1, 0); \
-  __ret_834; \
+#define vusdot_laneq_s32(__p0_850, __p1_850, __p2_850, __p3_850) __extension__ ({ \
+  int32x2_t __s0_850 = __p0_850; \
+  uint8x8_t __s1_850 = __p1_850; \
+  int8x16_t __s2_850 = __p2_850; \
+  int32x2_t __rev0_850;  __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 1, 0); \
+  uint8x8_t __rev1_850;  __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_850;  __rev2_850 = __builtin_shufflevector(__s2_850, __s2_850, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_850; \
+int8x16_t __reint_850 = __rev2_850; \
+  __ret_850 = __noswap_vusdot_s32(__rev0_850, __rev1_850, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_850, __p3_850))); \
+  __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 1, 0); \
+  __ret_850; \
 })
 #endif
 
@@ -67256,60 +67444,60 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vget_lane_f16(__p0_835, __p1_835) __extension__ ({ \
-  float16x4_t __s0_835 = __p0_835; \
-  float16_t __ret_835; \
-float16x4_t __reint_835 = __s0_835; \
-int16_t __reint1_835 = vget_lane_s16(*(int16x4_t *) &__reint_835, __p1_835); \
-  __ret_835 = *(float16_t *) &__reint1_835; \
-  __ret_835; \
+#define vget_lane_f16(__p0_851, __p1_851) __extension__ ({ \
+  float16x4_t __s0_851 = __p0_851; \
+  float16_t __ret_851; \
+float16x4_t __reint_851 = __s0_851; \
+int16_t __reint1_851 = vget_lane_s16(*(int16x4_t *) &__reint_851, __p1_851); \
+  __ret_851 = *(float16_t *) &__reint1_851; \
+  __ret_851; \
 })
 #else
-#define vget_lane_f16(__p0_836, __p1_836) __extension__ ({ \
-  float16x4_t __s0_836 = __p0_836; \
-  float16x4_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 3, 2, 1, 0); \
-  float16_t __ret_836; \
-float16x4_t __reint_836 = __rev0_836; \
-int16_t __reint1_836 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_836, __p1_836); \
-  __ret_836 = *(float16_t *) &__reint1_836; \
-  __ret_836; \
+#define vget_lane_f16(__p0_852, __p1_852) __extension__ ({ \
+  float16x4_t __s0_852 = __p0_852; \
+  float16x4_t __rev0_852;  __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \
+  float16_t __ret_852; \
+float16x4_t __reint_852 = __rev0_852; \
+int16_t __reint1_852 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_852, __p1_852); \
+  __ret_852 = *(float16_t *) &__reint1_852; \
+  __ret_852; \
 })
-#define __noswap_vget_lane_f16(__p0_837, __p1_837) __extension__ ({ \
-  float16x4_t __s0_837 = __p0_837; \
-  float16_t __ret_837; \
-float16x4_t __reint_837 = __s0_837; \
-int16_t __reint1_837 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_837, __p1_837); \
-  __ret_837 = *(float16_t *) &__reint1_837; \
-  __ret_837; \
+#define __noswap_vget_lane_f16(__p0_853, __p1_853) __extension__ ({ \
+  float16x4_t __s0_853 = __p0_853; \
+  float16_t __ret_853; \
+float16x4_t __reint_853 = __s0_853; \
+int16_t __reint1_853 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_853, __p1_853); \
+  __ret_853 = *(float16_t *) &__reint1_853; \
+  __ret_853; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f16(__p0_838, __p1_838) __extension__ ({ \
-  float16x8_t __s0_838 = __p0_838; \
-  float16_t __ret_838; \
-float16x8_t __reint_838 = __s0_838; \
-int16_t __reint1_838 = vgetq_lane_s16(*(int16x8_t *) &__reint_838, __p1_838); \
-  __ret_838 = *(float16_t *) &__reint1_838; \
-  __ret_838; \
+#define vgetq_lane_f16(__p0_854, __p1_854) __extension__ ({ \
+  float16x8_t __s0_854 = __p0_854; \
+  float16_t __ret_854; \
+float16x8_t __reint_854 = __s0_854; \
+int16_t __reint1_854 = vgetq_lane_s16(*(int16x8_t *) &__reint_854, __p1_854); \
+  __ret_854 = *(float16_t *) &__reint1_854; \
+  __ret_854; \
 })
 #else
-#define vgetq_lane_f16(__p0_839, __p1_839) __extension__ ({ \
-  float16x8_t __s0_839 = __p0_839; \
-  float16x8_t __rev0_839;  __rev0_839 = __builtin_shufflevector(__s0_839, __s0_839, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_839; \
-float16x8_t __reint_839 = __rev0_839; \
-int16_t __reint1_839 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_839, __p1_839); \
-  __ret_839 = *(float16_t *) &__reint1_839; \
-  __ret_839; \
+#define vgetq_lane_f16(__p0_855, __p1_855) __extension__ ({ \
+  float16x8_t __s0_855 = __p0_855; \
+  float16x8_t __rev0_855;  __rev0_855 = __builtin_shufflevector(__s0_855, __s0_855, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16_t __ret_855; \
+float16x8_t __reint_855 = __rev0_855; \
+int16_t __reint1_855 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_855, __p1_855); \
+  __ret_855 = *(float16_t *) &__reint1_855; \
+  __ret_855; \
 })
-#define __noswap_vgetq_lane_f16(__p0_840, __p1_840) __extension__ ({ \
-  float16x8_t __s0_840 = __p0_840; \
-  float16_t __ret_840; \
-float16x8_t __reint_840 = __s0_840; \
-int16_t __reint1_840 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_840, __p1_840); \
-  __ret_840 = *(float16_t *) &__reint1_840; \
-  __ret_840; \
+#define __noswap_vgetq_lane_f16(__p0_856, __p1_856) __extension__ ({ \
+  float16x8_t __s0_856 = __p0_856; \
+  float16_t __ret_856; \
+float16x8_t __reint_856 = __s0_856; \
+int16_t __reint1_856 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_856, __p1_856); \
+  __ret_856 = *(float16_t *) &__reint1_856; \
+  __ret_856; \
 })
 #endif
 
@@ -67452,98 +67640,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \
-  uint64x2_t __s0_841 = __p0_841; \
-  uint32x2_t __s1_841 = __p1_841; \
-  uint32x2_t __s2_841 = __p2_841; \
-  uint64x2_t __ret_841; \
-  __ret_841 = __s0_841 + vmull_u32(__s1_841, splat_lane_u32(__s2_841, __p3_841)); \
-  __ret_841; \
+#define vmlal_lane_u32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \
+  uint64x2_t __s0_857 = __p0_857; \
+  uint32x2_t __s1_857 = __p1_857; \
+  uint32x2_t __s2_857 = __p2_857; \
+  uint64x2_t __ret_857; \
+  __ret_857 = __s0_857 + vmull_u32(__s1_857, splat_lane_u32(__s2_857, __p3_857)); \
+  __ret_857; \
 })
 #else
-#define vmlal_lane_u32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \
-  uint64x2_t __s0_842 = __p0_842; \
-  uint32x2_t __s1_842 = __p1_842; \
-  uint32x2_t __s2_842 = __p2_842; \
-  uint64x2_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \
-  uint32x2_t __rev1_842;  __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 1, 0); \
-  uint32x2_t __rev2_842;  __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 1, 0); \
-  uint64x2_t __ret_842; \
-  __ret_842 = __rev0_842 + __noswap_vmull_u32(__rev1_842, __noswap_splat_lane_u32(__rev2_842, __p3_842)); \
-  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \
-  __ret_842; \
+#define vmlal_lane_u32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \
+  uint64x2_t __s0_858 = __p0_858; \
+  uint32x2_t __s1_858 = __p1_858; \
+  uint32x2_t __s2_858 = __p2_858; \
+  uint64x2_t __rev0_858;  __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \
+  uint32x2_t __rev1_858;  __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 1, 0); \
+  uint32x2_t __rev2_858;  __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 1, 0); \
+  uint64x2_t __ret_858; \
+  __ret_858 = __rev0_858 + __noswap_vmull_u32(__rev1_858, __noswap_splat_lane_u32(__rev2_858, __p3_858)); \
+  __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 1, 0); \
+  __ret_858; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u16(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
-  uint32x4_t __s0_843 = __p0_843; \
-  uint16x4_t __s1_843 = __p1_843; \
-  uint16x4_t __s2_843 = __p2_843; \
-  uint32x4_t __ret_843; \
-  __ret_843 = __s0_843 + vmull_u16(__s1_843, splat_lane_u16(__s2_843, __p3_843)); \
-  __ret_843; \
+#define vmlal_lane_u16(__p0_859, __p1_859, __p2_859, __p3_859) __extension__ ({ \
+  uint32x4_t __s0_859 = __p0_859; \
+  uint16x4_t __s1_859 = __p1_859; \
+  uint16x4_t __s2_859 = __p2_859; \
+  uint32x4_t __ret_859; \
+  __ret_859 = __s0_859 + vmull_u16(__s1_859, splat_lane_u16(__s2_859, __p3_859)); \
+  __ret_859; \
 })
 #else
-#define vmlal_lane_u16(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
-  uint32x4_t __s0_844 = __p0_844; \
-  uint16x4_t __s1_844 = __p1_844; \
-  uint16x4_t __s2_844 = __p2_844; \
-  uint32x4_t __rev0_844;  __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \
-  uint16x4_t __rev1_844;  __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 3, 2, 1, 0); \
-  uint16x4_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 3, 2, 1, 0); \
-  uint32x4_t __ret_844; \
-  __ret_844 = __rev0_844 + __noswap_vmull_u16(__rev1_844, __noswap_splat_lane_u16(__rev2_844, __p3_844)); \
-  __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \
-  __ret_844; \
+#define vmlal_lane_u16(__p0_860, __p1_860, __p2_860, __p3_860) __extension__ ({ \
+  uint32x4_t __s0_860 = __p0_860; \
+  uint16x4_t __s1_860 = __p1_860; \
+  uint16x4_t __s2_860 = __p2_860; \
+  uint32x4_t __rev0_860;  __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \
+  uint16x4_t __rev1_860;  __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 3, 2, 1, 0); \
+  uint16x4_t __rev2_860;  __rev2_860 = __builtin_shufflevector(__s2_860, __s2_860, 3, 2, 1, 0); \
+  uint32x4_t __ret_860; \
+  __ret_860 = __rev0_860 + __noswap_vmull_u16(__rev1_860, __noswap_splat_lane_u16(__rev2_860, __p3_860)); \
+  __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 3, 2, 1, 0); \
+  __ret_860; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
-  int64x2_t __s0_845 = __p0_845; \
-  int32x2_t __s1_845 = __p1_845; \
-  int32x2_t __s2_845 = __p2_845; \
-  int64x2_t __ret_845; \
-  __ret_845 = __s0_845 + vmull_s32(__s1_845, splat_lane_s32(__s2_845, __p3_845)); \
-  __ret_845; \
+#define vmlal_lane_s32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \
+  int64x2_t __s0_861 = __p0_861; \
+  int32x2_t __s1_861 = __p1_861; \
+  int32x2_t __s2_861 = __p2_861; \
+  int64x2_t __ret_861; \
+  __ret_861 = __s0_861 + vmull_s32(__s1_861, splat_lane_s32(__s2_861, __p3_861)); \
+  __ret_861; \
 })
 #else
-#define vmlal_lane_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
-  int64x2_t __s0_846 = __p0_846; \
-  int32x2_t __s1_846 = __p1_846; \
-  int32x2_t __s2_846 = __p2_846; \
-  int64x2_t __rev0_846;  __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \
-  int32x2_t __rev1_846;  __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 1, 0); \
-  int32x2_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \
-  int64x2_t __ret_846; \
-  __ret_846 = __rev0_846 + __noswap_vmull_s32(__rev1_846, __noswap_splat_lane_s32(__rev2_846, __p3_846)); \
-  __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \
-  __ret_846; \
+#define vmlal_lane_s32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \
+  int64x2_t __s0_862 = __p0_862; \
+  int32x2_t __s1_862 = __p1_862; \
+  int32x2_t __s2_862 = __p2_862; \
+  int64x2_t __rev0_862;  __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 1, 0); \
+  int32x2_t __rev1_862;  __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 1, 0); \
+  int32x2_t __rev2_862;  __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 1, 0); \
+  int64x2_t __ret_862; \
+  __ret_862 = __rev0_862 + __noswap_vmull_s32(__rev1_862, __noswap_splat_lane_s32(__rev2_862, __p3_862)); \
+  __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 1, 0); \
+  __ret_862; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s16(__p0_847, __p1_847, __p2_847, __p3_847) __extension__ ({ \
-  int32x4_t __s0_847 = __p0_847; \
-  int16x4_t __s1_847 = __p1_847; \
-  int16x4_t __s2_847 = __p2_847; \
-  int32x4_t __ret_847; \
-  __ret_847 = __s0_847 + vmull_s16(__s1_847, splat_lane_s16(__s2_847, __p3_847)); \
-  __ret_847; \
+#define vmlal_lane_s16(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \
+  int32x4_t __s0_863 = __p0_863; \
+  int16x4_t __s1_863 = __p1_863; \
+  int16x4_t __s2_863 = __p2_863; \
+  int32x4_t __ret_863; \
+  __ret_863 = __s0_863 + vmull_s16(__s1_863, splat_lane_s16(__s2_863, __p3_863)); \
+  __ret_863; \
 })
 #else
-#define vmlal_lane_s16(__p0_848, __p1_848, __p2_848, __p3_848) __extension__ ({ \
-  int32x4_t __s0_848 = __p0_848; \
-  int16x4_t __s1_848 = __p1_848; \
-  int16x4_t __s2_848 = __p2_848; \
-  int32x4_t __rev0_848;  __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \
-  int16x4_t __rev1_848;  __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 3, 2, 1, 0); \
-  int16x4_t __rev2_848;  __rev2_848 = __builtin_shufflevector(__s2_848, __s2_848, 3, 2, 1, 0); \
-  int32x4_t __ret_848; \
-  __ret_848 = __rev0_848 + __noswap_vmull_s16(__rev1_848, __noswap_splat_lane_s16(__rev2_848, __p3_848)); \
-  __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 3, 2, 1, 0); \
-  __ret_848; \
+#define vmlal_lane_s16(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \
+  int32x4_t __s0_864 = __p0_864; \
+  int16x4_t __s1_864 = __p1_864; \
+  int16x4_t __s2_864 = __p2_864; \
+  int32x4_t __rev0_864;  __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \
+  int16x4_t __rev1_864;  __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 3, 2, 1, 0); \
+  int16x4_t __rev2_864;  __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 3, 2, 1, 0); \
+  int32x4_t __ret_864; \
+  __ret_864 = __rev0_864 + __noswap_vmull_s16(__rev1_864, __noswap_splat_lane_s16(__rev2_864, __p3_864)); \
+  __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \
+  __ret_864; \
 })
 #endif
 
@@ -67774,98 +67962,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u32(__p0_849, __p1_849, __p2_849, __p3_849) __extension__ ({ \
-  uint64x2_t __s0_849 = __p0_849; \
-  uint32x2_t __s1_849 = __p1_849; \
-  uint32x2_t __s2_849 = __p2_849; \
-  uint64x2_t __ret_849; \
-  __ret_849 = __s0_849 - vmull_u32(__s1_849, splat_lane_u32(__s2_849, __p3_849)); \
-  __ret_849; \
+#define vmlsl_lane_u32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \
+  uint64x2_t __s0_865 = __p0_865; \
+  uint32x2_t __s1_865 = __p1_865; \
+  uint32x2_t __s2_865 = __p2_865; \
+  uint64x2_t __ret_865; \
+  __ret_865 = __s0_865 - vmull_u32(__s1_865, splat_lane_u32(__s2_865, __p3_865)); \
+  __ret_865; \
 })
 #else
-#define vmlsl_lane_u32(__p0_850, __p1_850, __p2_850, __p3_850) __extension__ ({ \
-  uint64x2_t __s0_850 = __p0_850; \
-  uint32x2_t __s1_850 = __p1_850; \
-  uint32x2_t __s2_850 = __p2_850; \
-  uint64x2_t __rev0_850;  __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 1, 0); \
-  uint32x2_t __rev1_850;  __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 1, 0); \
-  uint32x2_t __rev2_850;  __rev2_850 = __builtin_shufflevector(__s2_850, __s2_850, 1, 0); \
-  uint64x2_t __ret_850; \
-  __ret_850 = __rev0_850 - __noswap_vmull_u32(__rev1_850, __noswap_splat_lane_u32(__rev2_850, __p3_850)); \
-  __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 1, 0); \
-  __ret_850; \
+#define vmlsl_lane_u32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \
+  uint64x2_t __s0_866 = __p0_866; \
+  uint32x2_t __s1_866 = __p1_866; \
+  uint32x2_t __s2_866 = __p2_866; \
+  uint64x2_t __rev0_866;  __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \
+  uint32x2_t __rev1_866;  __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \
+  uint32x2_t __rev2_866;  __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \
+  uint64x2_t __ret_866; \
+  __ret_866 = __rev0_866 - __noswap_vmull_u32(__rev1_866, __noswap_splat_lane_u32(__rev2_866, __p3_866)); \
+  __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 1, 0); \
+  __ret_866; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u16(__p0_851, __p1_851, __p2_851, __p3_851) __extension__ ({ \
-  uint32x4_t __s0_851 = __p0_851; \
-  uint16x4_t __s1_851 = __p1_851; \
-  uint16x4_t __s2_851 = __p2_851; \
-  uint32x4_t __ret_851; \
-  __ret_851 = __s0_851 - vmull_u16(__s1_851, splat_lane_u16(__s2_851, __p3_851)); \
-  __ret_851; \
+#define vmlsl_lane_u16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \
+  uint32x4_t __s0_867 = __p0_867; \
+  uint16x4_t __s1_867 = __p1_867; \
+  uint16x4_t __s2_867 = __p2_867; \
+  uint32x4_t __ret_867; \
+  __ret_867 = __s0_867 - vmull_u16(__s1_867, splat_lane_u16(__s2_867, __p3_867)); \
+  __ret_867; \
 })
 #else
-#define vmlsl_lane_u16(__p0_852, __p1_852, __p2_852, __p3_852) __extension__ ({ \
-  uint32x4_t __s0_852 = __p0_852; \
-  uint16x4_t __s1_852 = __p1_852; \
-  uint16x4_t __s2_852 = __p2_852; \
-  uint32x4_t __rev0_852;  __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \
-  uint16x4_t __rev1_852;  __rev1_852 = __builtin_shufflevector(__s1_852, __s1_852, 3, 2, 1, 0); \
-  uint16x4_t __rev2_852;  __rev2_852 = __builtin_shufflevector(__s2_852, __s2_852, 3, 2, 1, 0); \
-  uint32x4_t __ret_852; \
-  __ret_852 = __rev0_852 - __noswap_vmull_u16(__rev1_852, __noswap_splat_lane_u16(__rev2_852, __p3_852)); \
-  __ret_852 = __builtin_shufflevector(__ret_852, __ret_852, 3, 2, 1, 0); \
-  __ret_852; \
+#define vmlsl_lane_u16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \
+  uint32x4_t __s0_868 = __p0_868; \
+  uint16x4_t __s1_868 = __p1_868; \
+  uint16x4_t __s2_868 = __p2_868; \
+  uint32x4_t __rev0_868;  __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \
+  uint16x4_t __rev1_868;  __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \
+  uint16x4_t __rev2_868;  __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \
+  uint32x4_t __ret_868; \
+  __ret_868 = __rev0_868 - __noswap_vmull_u16(__rev1_868, __noswap_splat_lane_u16(__rev2_868, __p3_868)); \
+  __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \
+  __ret_868; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \
-  int64x2_t __s0_853 = __p0_853; \
-  int32x2_t __s1_853 = __p1_853; \
-  int32x2_t __s2_853 = __p2_853; \
-  int64x2_t __ret_853; \
-  __ret_853 = __s0_853 - vmull_s32(__s1_853, splat_lane_s32(__s2_853, __p3_853)); \
-  __ret_853; \
+#define vmlsl_lane_s32(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \
+  int64x2_t __s0_869 = __p0_869; \
+  int32x2_t __s1_869 = __p1_869; \
+  int32x2_t __s2_869 = __p2_869; \
+  int64x2_t __ret_869; \
+  __ret_869 = __s0_869 - vmull_s32(__s1_869, splat_lane_s32(__s2_869, __p3_869)); \
+  __ret_869; \
 })
 #else
-#define vmlsl_lane_s32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \
-  int64x2_t __s0_854 = __p0_854; \
-  int32x2_t __s1_854 = __p1_854; \
-  int32x2_t __s2_854 = __p2_854; \
-  int64x2_t __rev0_854;  __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \
-  int32x2_t __rev1_854;  __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \
-  int32x2_t __rev2_854;  __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \
-  int64x2_t __ret_854; \
-  __ret_854 = __rev0_854 - __noswap_vmull_s32(__rev1_854, __noswap_splat_lane_s32(__rev2_854, __p3_854)); \
-  __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \
-  __ret_854; \
+#define vmlsl_lane_s32(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \
+  int64x2_t __s0_870 = __p0_870; \
+  int32x2_t __s1_870 = __p1_870; \
+  int32x2_t __s2_870 = __p2_870; \
+  int64x2_t __rev0_870;  __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 1, 0); \
+  int32x2_t __rev1_870;  __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 1, 0); \
+  int32x2_t __rev2_870;  __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 1, 0); \
+  int64x2_t __ret_870; \
+  __ret_870 = __rev0_870 - __noswap_vmull_s32(__rev1_870, __noswap_splat_lane_s32(__rev2_870, __p3_870)); \
+  __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 1, 0); \
+  __ret_870; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \
-  int32x4_t __s0_855 = __p0_855; \
-  int16x4_t __s1_855 = __p1_855; \
-  int16x4_t __s2_855 = __p2_855; \
-  int32x4_t __ret_855; \
-  __ret_855 = __s0_855 - vmull_s16(__s1_855, splat_lane_s16(__s2_855, __p3_855)); \
-  __ret_855; \
+#define vmlsl_lane_s16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \
+  int32x4_t __s0_871 = __p0_871; \
+  int16x4_t __s1_871 = __p1_871; \
+  int16x4_t __s2_871 = __p2_871; \
+  int32x4_t __ret_871; \
+  __ret_871 = __s0_871 - vmull_s16(__s1_871, splat_lane_s16(__s2_871, __p3_871)); \
+  __ret_871; \
 })
 #else
-#define vmlsl_lane_s16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \
-  int32x4_t __s0_856 = __p0_856; \
-  int16x4_t __s1_856 = __p1_856; \
-  int16x4_t __s2_856 = __p2_856; \
-  int32x4_t __rev0_856;  __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \
-  int16x4_t __rev1_856;  __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \
-  int16x4_t __rev2_856;  __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \
-  int32x4_t __ret_856; \
-  __ret_856 = __rev0_856 - __noswap_vmull_s16(__rev1_856, __noswap_splat_lane_s16(__rev2_856, __p3_856)); \
-  __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \
-  __ret_856; \
+#define vmlsl_lane_s16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \
+  int32x4_t __s0_872 = __p0_872; \
+  int16x4_t __s1_872 = __p1_872; \
+  int16x4_t __s2_872 = __p2_872; \
+  int32x4_t __rev0_872;  __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 3, 2, 1, 0); \
+  int16x4_t __rev1_872;  __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \
+  int16x4_t __rev2_872;  __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \
+  int32x4_t __ret_872; \
+  __ret_872 = __rev0_872 - __noswap_vmull_s16(__rev1_872, __noswap_splat_lane_s16(__rev2_872, __p3_872)); \
+  __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 3, 2, 1, 0); \
+  __ret_872; \
 })
 #endif
 
@@ -67958,151 +68146,151 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vset_lane_f16(__p0_857, __p1_857, __p2_857) __extension__ ({ \
-  float16_t __s0_857 = __p0_857; \
-  float16x4_t __s1_857 = __p1_857; \
-  float16x4_t __ret_857; \
-float16_t __reint_857 = __s0_857; \
-float16x4_t __reint1_857 = __s1_857; \
-int16x4_t __reint2_857 = vset_lane_s16(*(int16_t *) &__reint_857, *(int16x4_t *) &__reint1_857, __p2_857); \
-  __ret_857 = *(float16x4_t *) &__reint2_857; \
-  __ret_857; \
+#define vset_lane_f16(__p0_873, __p1_873, __p2_873) __extension__ ({ \
+  float16_t __s0_873 = __p0_873; \
+  float16x4_t __s1_873 = __p1_873; \
+  float16x4_t __ret_873; \
+float16_t __reint_873 = __s0_873; \
+float16x4_t __reint1_873 = __s1_873; \
+int16x4_t __reint2_873 = vset_lane_s16(*(int16_t *) &__reint_873, *(int16x4_t *) &__reint1_873, __p2_873); \
+  __ret_873 = *(float16x4_t *) &__reint2_873; \
+  __ret_873; \
 })
 #else
-#define vset_lane_f16(__p0_858, __p1_858, __p2_858) __extension__ ({ \
-  float16_t __s0_858 = __p0_858; \
-  float16x4_t __s1_858 = __p1_858; \
-  float16x4_t __rev1_858;  __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 3, 2, 1, 0); \
-  float16x4_t __ret_858; \
-float16_t __reint_858 = __s0_858; \
-float16x4_t __reint1_858 = __rev1_858; \
-int16x4_t __reint2_858 = __noswap_vset_lane_s16(*(int16_t *) &__reint_858, *(int16x4_t *) &__reint1_858, __p2_858); \
-  __ret_858 = *(float16x4_t *) &__reint2_858; \
-  __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 3, 2, 1, 0); \
-  __ret_858; \
+#define vset_lane_f16(__p0_874, __p1_874, __p2_874) __extension__ ({ \
+  float16_t __s0_874 = __p0_874; \
+  float16x4_t __s1_874 = __p1_874; \
+  float16x4_t __rev1_874;  __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 3, 2, 1, 0); \
+  float16x4_t __ret_874; \
+float16_t __reint_874 = __s0_874; \
+float16x4_t __reint1_874 = __rev1_874; \
+int16x4_t __reint2_874 = __noswap_vset_lane_s16(*(int16_t *) &__reint_874, *(int16x4_t *) &__reint1_874, __p2_874); \
+  __ret_874 = *(float16x4_t *) &__reint2_874; \
+  __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \
+  __ret_874; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f16(__p0_859, __p1_859, __p2_859) __extension__ ({ \
-  float16_t __s0_859 = __p0_859; \
-  float16x8_t __s1_859 = __p1_859; \
-  float16x8_t __ret_859; \
-float16_t __reint_859 = __s0_859; \
-float16x8_t __reint1_859 = __s1_859; \
-int16x8_t __reint2_859 = vsetq_lane_s16(*(int16_t *) &__reint_859, *(int16x8_t *) &__reint1_859, __p2_859); \
-  __ret_859 = *(float16x8_t *) &__reint2_859; \
-  __ret_859; \
+#define vsetq_lane_f16(__p0_875, __p1_875, __p2_875) __extension__ ({ \
+  float16_t __s0_875 = __p0_875; \
+  float16x8_t __s1_875 = __p1_875; \
+  float16x8_t __ret_875; \
+float16_t __reint_875 = __s0_875; \
+float16x8_t __reint1_875 = __s1_875; \
+int16x8_t __reint2_875 = vsetq_lane_s16(*(int16_t *) &__reint_875, *(int16x8_t *) &__reint1_875, __p2_875); \
+  __ret_875 = *(float16x8_t *) &__reint2_875; \
+  __ret_875; \
 })
 #else
-#define vsetq_lane_f16(__p0_860, __p1_860, __p2_860) __extension__ ({ \
-  float16_t __s0_860 = __p0_860; \
-  float16x8_t __s1_860 = __p1_860; \
-  float16x8_t __rev1_860;  __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_860; \
-float16_t __reint_860 = __s0_860; \
-float16x8_t __reint1_860 = __rev1_860; \
-int16x8_t __reint2_860 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_860, *(int16x8_t *) &__reint1_860, __p2_860); \
-  __ret_860 = *(float16x8_t *) &__reint2_860; \
-  __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_860; \
+#define vsetq_lane_f16(__p0_876, __p1_876, __p2_876) __extension__ ({ \
+  float16_t __s0_876 = __p0_876; \
+  float16x8_t __s1_876 = __p1_876; \
+  float16x8_t __rev1_876;  __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_876; \
+float16_t __reint_876 = __s0_876; \
+float16x8_t __reint1_876 = __rev1_876; \
+int16x8_t __reint2_876 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_876, *(int16x8_t *) &__reint1_876, __p2_876); \
+  __ret_876 = *(float16x8_t *) &__reint2_876; \
+  __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_876; \
 })
 #endif
 
 #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_lane_f32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \
-  float32x4_t __s0_861 = __p0_861; \
-  bfloat16x8_t __s1_861 = __p1_861; \
-  bfloat16x4_t __s2_861 = __p2_861; \
-  float32x4_t __ret_861; \
-  __ret_861 = vbfmlalbq_f32(__s0_861, __s1_861, (bfloat16x8_t) {vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861)}); \
-  __ret_861; \
+#define vbfmlalbq_lane_f32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \
+  float32x4_t __s0_877 = __p0_877; \
+  bfloat16x8_t __s1_877 = __p1_877; \
+  bfloat16x4_t __s2_877 = __p2_877; \
+  float32x4_t __ret_877; \
+  __ret_877 = vbfmlalbq_f32(__s0_877, __s1_877, (bfloat16x8_t) {vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877)}); \
+  __ret_877; \
 })
 #else
-#define vbfmlalbq_lane_f32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \
-  float32x4_t __s0_862 = __p0_862; \
-  bfloat16x8_t __s1_862 = __p1_862; \
-  bfloat16x4_t __s2_862 = __p2_862; \
-  float32x4_t __rev0_862;  __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_862;  __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_862;  __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 3, 2, 1, 0); \
-  float32x4_t __ret_862; \
-  __ret_862 = __noswap_vbfmlalbq_f32(__rev0_862, __rev1_862, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862)}); \
-  __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 3, 2, 1, 0); \
-  __ret_862; \
+#define vbfmlalbq_lane_f32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \
+  float32x4_t __s0_878 = __p0_878; \
+  bfloat16x8_t __s1_878 = __p1_878; \
+  bfloat16x4_t __s2_878 = __p2_878; \
+  float32x4_t __rev0_878;  __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_878;  __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_878;  __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 3, 2, 1, 0); \
+  float32x4_t __ret_878; \
+  __ret_878 = __noswap_vbfmlalbq_f32(__rev0_878, __rev1_878, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878)}); \
+  __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \
+  __ret_878; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_laneq_f32(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \
-  float32x4_t __s0_863 = __p0_863; \
-  bfloat16x8_t __s1_863 = __p1_863; \
-  bfloat16x8_t __s2_863 = __p2_863; \
-  float32x4_t __ret_863; \
-  __ret_863 = vbfmlalbq_f32(__s0_863, __s1_863, (bfloat16x8_t) {vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863)}); \
-  __ret_863; \
+#define vbfmlalbq_laneq_f32(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \
+  float32x4_t __s0_879 = __p0_879; \
+  bfloat16x8_t __s1_879 = __p1_879; \
+  bfloat16x8_t __s2_879 = __p2_879; \
+  float32x4_t __ret_879; \
+  __ret_879 = vbfmlalbq_f32(__s0_879, __s1_879, (bfloat16x8_t) {vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879)}); \
+  __ret_879; \
 })
 #else
-#define vbfmlalbq_laneq_f32(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \
-  float32x4_t __s0_864 = __p0_864; \
-  bfloat16x8_t __s1_864 = __p1_864; \
-  bfloat16x8_t __s2_864 = __p2_864; \
-  float32x4_t __rev0_864;  __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_864;  __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_864;  __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_864; \
-  __ret_864 = __noswap_vbfmlalbq_f32(__rev0_864, __rev1_864, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864)}); \
-  __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \
-  __ret_864; \
+#define vbfmlalbq_laneq_f32(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \
+  float32x4_t __s0_880 = __p0_880; \
+  bfloat16x8_t __s1_880 = __p1_880; \
+  bfloat16x8_t __s2_880 = __p2_880; \
+  float32x4_t __rev0_880;  __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_880;  __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_880;  __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_880; \
+  __ret_880 = __noswap_vbfmlalbq_f32(__rev0_880, __rev1_880, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880)}); \
+  __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 3, 2, 1, 0); \
+  __ret_880; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_lane_f32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \
-  float32x4_t __s0_865 = __p0_865; \
-  bfloat16x8_t __s1_865 = __p1_865; \
-  bfloat16x4_t __s2_865 = __p2_865; \
-  float32x4_t __ret_865; \
-  __ret_865 = vbfmlaltq_f32(__s0_865, __s1_865, (bfloat16x8_t) {vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865)}); \
-  __ret_865; \
+#define vbfmlaltq_lane_f32(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \
+  float32x4_t __s0_881 = __p0_881; \
+  bfloat16x8_t __s1_881 = __p1_881; \
+  bfloat16x4_t __s2_881 = __p2_881; \
+  float32x4_t __ret_881; \
+  __ret_881 = vbfmlaltq_f32(__s0_881, __s1_881, (bfloat16x8_t) {vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881)}); \
+  __ret_881; \
 })
 #else
-#define vbfmlaltq_lane_f32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \
-  float32x4_t __s0_866 = __p0_866; \
-  bfloat16x8_t __s1_866 = __p1_866; \
-  bfloat16x4_t __s2_866 = __p2_866; \
-  float32x4_t __rev0_866;  __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_866;  __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_866;  __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 3, 2, 1, 0); \
-  float32x4_t __ret_866; \
-  __ret_866 = __noswap_vbfmlaltq_f32(__rev0_866, __rev1_866, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866)}); \
-  __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 3, 2, 1, 0); \
-  __ret_866; \
+#define vbfmlaltq_lane_f32(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \
+  float32x4_t __s0_882 = __p0_882; \
+  bfloat16x8_t __s1_882 = __p1_882; \
+  bfloat16x4_t __s2_882 = __p2_882; \
+  float32x4_t __rev0_882;  __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_882;  __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_882;  __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 3, 2, 1, 0); \
+  float32x4_t __ret_882; \
+  __ret_882 = __noswap_vbfmlaltq_f32(__rev0_882, __rev1_882, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882)}); \
+  __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \
+  __ret_882; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_laneq_f32(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \
-  float32x4_t __s0_867 = __p0_867; \
-  bfloat16x8_t __s1_867 = __p1_867; \
-  bfloat16x8_t __s2_867 = __p2_867; \
-  float32x4_t __ret_867; \
-  __ret_867 = vbfmlaltq_f32(__s0_867, __s1_867, (bfloat16x8_t) {vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867)}); \
-  __ret_867; \
+#define vbfmlaltq_laneq_f32(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \
+  float32x4_t __s0_883 = __p0_883; \
+  bfloat16x8_t __s1_883 = __p1_883; \
+  bfloat16x8_t __s2_883 = __p2_883; \
+  float32x4_t __ret_883; \
+  __ret_883 = vbfmlaltq_f32(__s0_883, __s1_883, (bfloat16x8_t) {vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883)}); \
+  __ret_883; \
 })
 #else
-#define vbfmlaltq_laneq_f32(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \
-  float32x4_t __s0_868 = __p0_868; \
-  bfloat16x8_t __s1_868 = __p1_868; \
-  bfloat16x8_t __s2_868 = __p2_868; \
-  float32x4_t __rev0_868;  __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_868;  __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_868;  __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_868; \
-  __ret_868 = __noswap_vbfmlaltq_f32(__rev0_868, __rev1_868, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868)}); \
-  __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \
-  __ret_868; \
+#define vbfmlaltq_laneq_f32(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \
+  float32x4_t __s0_884 = __p0_884; \
+  bfloat16x8_t __s1_884 = __p1_884; \
+  bfloat16x8_t __s2_884 = __p2_884; \
+  float32x4_t __rev0_884;  __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_884;  __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_884;  __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_884; \
+  __ret_884 = __noswap_vbfmlaltq_f32(__rev0_884, __rev1_884, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884)}); \
+  __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 3, 2, 1, 0); \
+  __ret_884; \
 })
 #endif
 
@@ -68141,208 +68329,16 @@
 #endif
 #if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_high_f16(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \
-  float32x4_t __s0_869 = __p0_869; \
-  float16x8_t __s1_869 = __p1_869; \
-  float16x4_t __s2_869 = __p2_869; \
-  float32x4_t __ret_869; \
-  __ret_869 = vfmlalq_high_f16(__s0_869, __s1_869, (float16x8_t) {vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869)}); \
-  __ret_869; \
-})
-#else
-#define vfmlalq_lane_high_f16(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \
-  float32x4_t __s0_870 = __p0_870; \
-  float16x8_t __s1_870 = __p1_870; \
-  float16x4_t __s2_870 = __p2_870; \
-  float32x4_t __rev0_870;  __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 3, 2, 1, 0); \
-  float16x8_t __rev1_870;  __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_870;  __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 3, 2, 1, 0); \
-  float32x4_t __ret_870; \
-  __ret_870 = __noswap_vfmlalq_high_f16(__rev0_870, __rev1_870, (float16x8_t) {__noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870)}); \
-  __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \
-  __ret_870; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_high_f16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \
-  float32x2_t __s0_871 = __p0_871; \
-  float16x4_t __s1_871 = __p1_871; \
-  float16x4_t __s2_871 = __p2_871; \
-  float32x2_t __ret_871; \
-  __ret_871 = vfmlal_high_f16(__s0_871, __s1_871, (float16x4_t) {vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871)}); \
-  __ret_871; \
-})
-#else
-#define vfmlal_lane_high_f16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \
-  float32x2_t __s0_872 = __p0_872; \
-  float16x4_t __s1_872 = __p1_872; \
-  float16x4_t __s2_872 = __p2_872; \
-  float32x2_t __rev0_872;  __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 1, 0); \
-  float16x4_t __rev1_872;  __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \
-  float16x4_t __rev2_872;  __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \
-  float32x2_t __ret_872; \
-  __ret_872 = __noswap_vfmlal_high_f16(__rev0_872, __rev1_872, (float16x4_t) {__noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872)}); \
-  __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 1, 0); \
-  __ret_872; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_low_f16(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \
-  float32x4_t __s0_873 = __p0_873; \
-  float16x8_t __s1_873 = __p1_873; \
-  float16x4_t __s2_873 = __p2_873; \
-  float32x4_t __ret_873; \
-  __ret_873 = vfmlalq_low_f16(__s0_873, __s1_873, (float16x8_t) {vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873)}); \
-  __ret_873; \
-})
-#else
-#define vfmlalq_lane_low_f16(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \
-  float32x4_t __s0_874 = __p0_874; \
-  float16x8_t __s1_874 = __p1_874; \
-  float16x4_t __s2_874 = __p2_874; \
-  float32x4_t __rev0_874;  __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \
-  float16x8_t __rev1_874;  __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_874;  __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \
-  float32x4_t __ret_874; \
-  __ret_874 = __noswap_vfmlalq_low_f16(__rev0_874, __rev1_874, (float16x8_t) {__noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874)}); \
-  __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \
-  __ret_874; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_low_f16(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \
-  float32x2_t __s0_875 = __p0_875; \
-  float16x4_t __s1_875 = __p1_875; \
-  float16x4_t __s2_875 = __p2_875; \
-  float32x2_t __ret_875; \
-  __ret_875 = vfmlal_low_f16(__s0_875, __s1_875, (float16x4_t) {vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875)}); \
-  __ret_875; \
-})
-#else
-#define vfmlal_lane_low_f16(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \
-  float32x2_t __s0_876 = __p0_876; \
-  float16x4_t __s1_876 = __p1_876; \
-  float16x4_t __s2_876 = __p2_876; \
-  float32x2_t __rev0_876;  __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 1, 0); \
-  float16x4_t __rev1_876;  __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 3, 2, 1, 0); \
-  float16x4_t __rev2_876;  __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 3, 2, 1, 0); \
-  float32x2_t __ret_876; \
-  __ret_876 = __noswap_vfmlal_low_f16(__rev0_876, __rev1_876, (float16x4_t) {__noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876)}); \
-  __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 1, 0); \
-  __ret_876; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_high_f16(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \
-  float32x4_t __s0_877 = __p0_877; \
-  float16x8_t __s1_877 = __p1_877; \
-  float16x8_t __s2_877 = __p2_877; \
-  float32x4_t __ret_877; \
-  __ret_877 = vfmlalq_high_f16(__s0_877, __s1_877, (float16x8_t) {vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877)}); \
-  __ret_877; \
-})
-#else
-#define vfmlalq_laneq_high_f16(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \
-  float32x4_t __s0_878 = __p0_878; \
-  float16x8_t __s1_878 = __p1_878; \
-  float16x8_t __s2_878 = __p2_878; \
-  float32x4_t __rev0_878;  __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \
-  float16x8_t __rev1_878;  __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_878;  __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_878; \
-  __ret_878 = __noswap_vfmlalq_high_f16(__rev0_878, __rev1_878, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878)}); \
-  __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \
-  __ret_878; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_high_f16(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \
-  float32x2_t __s0_879 = __p0_879; \
-  float16x4_t __s1_879 = __p1_879; \
-  float16x8_t __s2_879 = __p2_879; \
-  float32x2_t __ret_879; \
-  __ret_879 = vfmlal_high_f16(__s0_879, __s1_879, (float16x4_t) {vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879)}); \
-  __ret_879; \
-})
-#else
-#define vfmlal_laneq_high_f16(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \
-  float32x2_t __s0_880 = __p0_880; \
-  float16x4_t __s1_880 = __p1_880; \
-  float16x8_t __s2_880 = __p2_880; \
-  float32x2_t __rev0_880;  __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 1, 0); \
-  float16x4_t __rev1_880;  __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 3, 2, 1, 0); \
-  float16x8_t __rev2_880;  __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_880; \
-  __ret_880 = __noswap_vfmlal_high_f16(__rev0_880, __rev1_880, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880)}); \
-  __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 1, 0); \
-  __ret_880; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_low_f16(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \
-  float32x4_t __s0_881 = __p0_881; \
-  float16x8_t __s1_881 = __p1_881; \
-  float16x8_t __s2_881 = __p2_881; \
-  float32x4_t __ret_881; \
-  __ret_881 = vfmlalq_low_f16(__s0_881, __s1_881, (float16x8_t) {vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881)}); \
-  __ret_881; \
-})
-#else
-#define vfmlalq_laneq_low_f16(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \
-  float32x4_t __s0_882 = __p0_882; \
-  float16x8_t __s1_882 = __p1_882; \
-  float16x8_t __s2_882 = __p2_882; \
-  float32x4_t __rev0_882;  __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \
-  float16x8_t __rev1_882;  __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_882;  __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_882; \
-  __ret_882 = __noswap_vfmlalq_low_f16(__rev0_882, __rev1_882, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882)}); \
-  __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \
-  __ret_882; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_low_f16(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \
-  float32x2_t __s0_883 = __p0_883; \
-  float16x4_t __s1_883 = __p1_883; \
-  float16x8_t __s2_883 = __p2_883; \
-  float32x2_t __ret_883; \
-  __ret_883 = vfmlal_low_f16(__s0_883, __s1_883, (float16x4_t) {vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883)}); \
-  __ret_883; \
-})
-#else
-#define vfmlal_laneq_low_f16(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \
-  float32x2_t __s0_884 = __p0_884; \
-  float16x4_t __s1_884 = __p1_884; \
-  float16x8_t __s2_884 = __p2_884; \
-  float32x2_t __rev0_884;  __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 1, 0); \
-  float16x4_t __rev1_884;  __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 3, 2, 1, 0); \
-  float16x8_t __rev2_884;  __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_884; \
-  __ret_884 = __noswap_vfmlal_low_f16(__rev0_884, __rev1_884, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884)}); \
-  __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 1, 0); \
-  __ret_884; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \
+#define vfmlalq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \
   float32x4_t __s0_885 = __p0_885; \
   float16x8_t __s1_885 = __p1_885; \
   float16x4_t __s2_885 = __p2_885; \
   float32x4_t __ret_885; \
-  __ret_885 = vfmlslq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \
+  __ret_885 = vfmlalq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \
   __ret_885; \
 })
 #else
-#define vfmlslq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \
+#define vfmlalq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \
   float32x4_t __s0_886 = __p0_886; \
   float16x8_t __s1_886 = __p1_886; \
   float16x4_t __s2_886 = __p2_886; \
@@ -68350,23 +68346,23 @@
   float16x8_t __rev1_886;  __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x4_t __rev2_886;  __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, 3, 2, 1, 0); \
   float32x4_t __ret_886; \
-  __ret_886 = __noswap_vfmlslq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \
+  __ret_886 = __noswap_vfmlalq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \
   __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 3, 2, 1, 0); \
   __ret_886; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \
+#define vfmlal_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \
   float32x2_t __s0_887 = __p0_887; \
   float16x4_t __s1_887 = __p1_887; \
   float16x4_t __s2_887 = __p2_887; \
   float32x2_t __ret_887; \
-  __ret_887 = vfmlsl_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \
+  __ret_887 = vfmlal_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \
   __ret_887; \
 })
 #else
-#define vfmlsl_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \
+#define vfmlal_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \
   float32x2_t __s0_888 = __p0_888; \
   float16x4_t __s1_888 = __p1_888; \
   float16x4_t __s2_888 = __p2_888; \
@@ -68374,23 +68370,23 @@
   float16x4_t __rev1_888;  __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, 3, 2, 1, 0); \
   float16x4_t __rev2_888;  __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, 3, 2, 1, 0); \
   float32x2_t __ret_888; \
-  __ret_888 = __noswap_vfmlsl_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \
+  __ret_888 = __noswap_vfmlal_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \
   __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \
   __ret_888; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \
+#define vfmlalq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \
   float32x4_t __s0_889 = __p0_889; \
   float16x8_t __s1_889 = __p1_889; \
   float16x4_t __s2_889 = __p2_889; \
   float32x4_t __ret_889; \
-  __ret_889 = vfmlslq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \
+  __ret_889 = vfmlalq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \
   __ret_889; \
 })
 #else
-#define vfmlslq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \
+#define vfmlalq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \
   float32x4_t __s0_890 = __p0_890; \
   float16x8_t __s1_890 = __p1_890; \
   float16x4_t __s2_890 = __p2_890; \
@@ -68398,23 +68394,23 @@
   float16x8_t __rev1_890;  __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x4_t __rev2_890;  __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 3, 2, 1, 0); \
   float32x4_t __ret_890; \
-  __ret_890 = __noswap_vfmlslq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \
+  __ret_890 = __noswap_vfmlalq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \
   __ret_890 = __builtin_shufflevector(__ret_890, __ret_890, 3, 2, 1, 0); \
   __ret_890; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \
+#define vfmlal_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \
   float32x2_t __s0_891 = __p0_891; \
   float16x4_t __s1_891 = __p1_891; \
   float16x4_t __s2_891 = __p2_891; \
   float32x2_t __ret_891; \
-  __ret_891 = vfmlsl_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \
+  __ret_891 = vfmlal_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \
   __ret_891; \
 })
 #else
-#define vfmlsl_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \
+#define vfmlal_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \
   float32x2_t __s0_892 = __p0_892; \
   float16x4_t __s1_892 = __p1_892; \
   float16x4_t __s2_892 = __p2_892; \
@@ -68422,23 +68418,23 @@
   float16x4_t __rev1_892;  __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, 3, 2, 1, 0); \
   float16x4_t __rev2_892;  __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 3, 2, 1, 0); \
   float32x2_t __ret_892; \
-  __ret_892 = __noswap_vfmlsl_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \
+  __ret_892 = __noswap_vfmlal_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \
   __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \
   __ret_892; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \
+#define vfmlalq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \
   float32x4_t __s0_893 = __p0_893; \
   float16x8_t __s1_893 = __p1_893; \
   float16x8_t __s2_893 = __p2_893; \
   float32x4_t __ret_893; \
-  __ret_893 = vfmlslq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \
+  __ret_893 = vfmlalq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \
   __ret_893; \
 })
 #else
-#define vfmlslq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \
+#define vfmlalq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \
   float32x4_t __s0_894 = __p0_894; \
   float16x8_t __s1_894 = __p1_894; \
   float16x8_t __s2_894 = __p2_894; \
@@ -68446,23 +68442,23 @@
   float16x8_t __rev1_894;  __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x8_t __rev2_894;  __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 7, 6, 5, 4, 3, 2, 1, 0); \
   float32x4_t __ret_894; \
-  __ret_894 = __noswap_vfmlslq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \
+  __ret_894 = __noswap_vfmlalq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \
   __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 3, 2, 1, 0); \
   __ret_894; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \
+#define vfmlal_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \
   float32x2_t __s0_895 = __p0_895; \
   float16x4_t __s1_895 = __p1_895; \
   float16x8_t __s2_895 = __p2_895; \
   float32x2_t __ret_895; \
-  __ret_895 = vfmlsl_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \
+  __ret_895 = vfmlal_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \
   __ret_895; \
 })
 #else
-#define vfmlsl_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \
+#define vfmlal_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \
   float32x2_t __s0_896 = __p0_896; \
   float16x4_t __s1_896 = __p1_896; \
   float16x8_t __s2_896 = __p2_896; \
@@ -68470,23 +68466,23 @@
   float16x4_t __rev1_896;  __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, 3, 2, 1, 0); \
   float16x8_t __rev2_896;  __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 7, 6, 5, 4, 3, 2, 1, 0); \
   float32x2_t __ret_896; \
-  __ret_896 = __noswap_vfmlsl_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \
+  __ret_896 = __noswap_vfmlal_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \
   __ret_896 = __builtin_shufflevector(__ret_896, __ret_896, 1, 0); \
   __ret_896; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \
+#define vfmlalq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \
   float32x4_t __s0_897 = __p0_897; \
   float16x8_t __s1_897 = __p1_897; \
   float16x8_t __s2_897 = __p2_897; \
   float32x4_t __ret_897; \
-  __ret_897 = vfmlslq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \
+  __ret_897 = vfmlalq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \
   __ret_897; \
 })
 #else
-#define vfmlslq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \
+#define vfmlalq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \
   float32x4_t __s0_898 = __p0_898; \
   float16x8_t __s1_898 = __p1_898; \
   float16x8_t __s2_898 = __p2_898; \
@@ -68494,23 +68490,23 @@
   float16x8_t __rev1_898;  __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x8_t __rev2_898;  __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 7, 6, 5, 4, 3, 2, 1, 0); \
   float32x4_t __ret_898; \
-  __ret_898 = __noswap_vfmlslq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \
+  __ret_898 = __noswap_vfmlalq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \
   __ret_898 = __builtin_shufflevector(__ret_898, __ret_898, 3, 2, 1, 0); \
   __ret_898; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \
+#define vfmlal_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \
   float32x2_t __s0_899 = __p0_899; \
   float16x4_t __s1_899 = __p1_899; \
   float16x8_t __s2_899 = __p2_899; \
   float32x2_t __ret_899; \
-  __ret_899 = vfmlsl_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \
+  __ret_899 = vfmlal_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \
   __ret_899; \
 })
 #else
-#define vfmlsl_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \
+#define vfmlal_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \
   float32x2_t __s0_900 = __p0_900; \
   float16x4_t __s1_900 = __p1_900; \
   float16x8_t __s2_900 = __p2_900; \
@@ -68518,292 +68514,294 @@
   float16x4_t __rev1_900;  __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, 3, 2, 1, 0); \
   float16x8_t __rev2_900;  __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 7, 6, 5, 4, 3, 2, 1, 0); \
   float32x2_t __ret_900; \
-  __ret_900 = __noswap_vfmlsl_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \
+  __ret_900 = __noswap_vfmlal_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \
   __ret_900 = __builtin_shufflevector(__ret_900, __ret_900, 1, 0); \
   __ret_900; \
 })
 #endif
 
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vmulh_lane_f16(__p0_901, __p1_901, __p2_901) __extension__ ({ \
-  float16_t __s0_901 = __p0_901; \
-  float16x4_t __s1_901 = __p1_901; \
-  float16_t __ret_901; \
-  __ret_901 = __s0_901 * vget_lane_f16(__s1_901, __p2_901); \
+#define vfmlslq_lane_high_f16(__p0_901, __p1_901, __p2_901, __p3_901) __extension__ ({ \
+  float32x4_t __s0_901 = __p0_901; \
+  float16x8_t __s1_901 = __p1_901; \
+  float16x4_t __s2_901 = __p2_901; \
+  float32x4_t __ret_901; \
+  __ret_901 = vfmlslq_high_f16(__s0_901, __s1_901, (float16x8_t) {vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901)}); \
   __ret_901; \
 })
 #else
-#define vmulh_lane_f16(__p0_902, __p1_902, __p2_902) __extension__ ({ \
-  float16_t __s0_902 = __p0_902; \
-  float16x4_t __s1_902 = __p1_902; \
-  float16x4_t __rev1_902;  __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 3, 2, 1, 0); \
-  float16_t __ret_902; \
-  __ret_902 = __s0_902 * __noswap_vget_lane_f16(__rev1_902, __p2_902); \
+#define vfmlslq_lane_high_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \
+  float32x4_t __s0_902 = __p0_902; \
+  float16x8_t __s1_902 = __p1_902; \
+  float16x4_t __s2_902 = __p2_902; \
+  float32x4_t __rev0_902;  __rev0_902 = __builtin_shufflevector(__s0_902, __s0_902, 3, 2, 1, 0); \
+  float16x8_t __rev1_902;  __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_902;  __rev2_902 = __builtin_shufflevector(__s2_902, __s2_902, 3, 2, 1, 0); \
+  float32x4_t __ret_902; \
+  __ret_902 = __noswap_vfmlslq_high_f16(__rev0_902, __rev1_902, (float16x8_t) {__noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902)}); \
+  __ret_902 = __builtin_shufflevector(__ret_902, __ret_902, 3, 2, 1, 0); \
   __ret_902; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulh_laneq_f16(__p0_903, __p1_903, __p2_903) __extension__ ({ \
-  float16_t __s0_903 = __p0_903; \
-  float16x8_t __s1_903 = __p1_903; \
-  float16_t __ret_903; \
-  __ret_903 = __s0_903 * vgetq_lane_f16(__s1_903, __p2_903); \
+#define vfmlsl_lane_high_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \
+  float32x2_t __s0_903 = __p0_903; \
+  float16x4_t __s1_903 = __p1_903; \
+  float16x4_t __s2_903 = __p2_903; \
+  float32x2_t __ret_903; \
+  __ret_903 = vfmlsl_high_f16(__s0_903, __s1_903, (float16x4_t) {vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903)}); \
   __ret_903; \
 })
 #else
-#define vmulh_laneq_f16(__p0_904, __p1_904, __p2_904) __extension__ ({ \
-  float16_t __s0_904 = __p0_904; \
-  float16x8_t __s1_904 = __p1_904; \
-  float16x8_t __rev1_904;  __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_904; \
-  __ret_904 = __s0_904 * __noswap_vgetq_lane_f16(__rev1_904, __p2_904); \
+#define vfmlsl_lane_high_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \
+  float32x2_t __s0_904 = __p0_904; \
+  float16x4_t __s1_904 = __p1_904; \
+  float16x4_t __s2_904 = __p2_904; \
+  float32x2_t __rev0_904;  __rev0_904 = __builtin_shufflevector(__s0_904, __s0_904, 1, 0); \
+  float16x4_t __rev1_904;  __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 3, 2, 1, 0); \
+  float16x4_t __rev2_904;  __rev2_904 = __builtin_shufflevector(__s2_904, __s2_904, 3, 2, 1, 0); \
+  float32x2_t __ret_904; \
+  __ret_904 = __noswap_vfmlsl_high_f16(__rev0_904, __rev1_904, (float16x4_t) {__noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904)}); \
+  __ret_904 = __builtin_shufflevector(__ret_904, __ret_904, 1, 0); \
   __ret_904; \
 })
 #endif
 
-#endif
-#if defined(__ARM_FEATURE_MATMUL_INT8)
 #ifdef __LITTLE_ENDIAN__
-#define vsudotq_lane_s32(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \
-  int32x4_t __s0_905 = __p0_905; \
-  int8x16_t __s1_905 = __p1_905; \
-  uint8x8_t __s2_905 = __p2_905; \
-  int32x4_t __ret_905; \
-uint8x8_t __reint_905 = __s2_905; \
-  __ret_905 = vusdotq_s32(__s0_905, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_905, __p3_905)), __s1_905); \
+#define vfmlslq_lane_low_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \
+  float32x4_t __s0_905 = __p0_905; \
+  float16x8_t __s1_905 = __p1_905; \
+  float16x4_t __s2_905 = __p2_905; \
+  float32x4_t __ret_905; \
+  __ret_905 = vfmlslq_low_f16(__s0_905, __s1_905, (float16x8_t) {vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905)}); \
   __ret_905; \
 })
 #else
-#define vsudotq_lane_s32(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \
-  int32x4_t __s0_906 = __p0_906; \
-  int8x16_t __s1_906 = __p1_906; \
-  uint8x8_t __s2_906 = __p2_906; \
-  int32x4_t __rev0_906;  __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \
-  int8x16_t __rev1_906;  __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_906;  __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_906; \
-uint8x8_t __reint_906 = __rev2_906; \
-  __ret_906 = __noswap_vusdotq_s32(__rev0_906, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_906, __p3_906)), __rev1_906); \
+#define vfmlslq_lane_low_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \
+  float32x4_t __s0_906 = __p0_906; \
+  float16x8_t __s1_906 = __p1_906; \
+  float16x4_t __s2_906 = __p2_906; \
+  float32x4_t __rev0_906;  __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \
+  float16x8_t __rev1_906;  __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_906;  __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 3, 2, 1, 0); \
+  float32x4_t __ret_906; \
+  __ret_906 = __noswap_vfmlslq_low_f16(__rev0_906, __rev1_906, (float16x8_t) {__noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906)}); \
   __ret_906 = __builtin_shufflevector(__ret_906, __ret_906, 3, 2, 1, 0); \
   __ret_906; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsudot_lane_s32(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \
-  int32x2_t __s0_907 = __p0_907; \
-  int8x8_t __s1_907 = __p1_907; \
-  uint8x8_t __s2_907 = __p2_907; \
-  int32x2_t __ret_907; \
-uint8x8_t __reint_907 = __s2_907; \
-  __ret_907 = vusdot_s32(__s0_907, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_907, __p3_907)), __s1_907); \
+#define vfmlsl_lane_low_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \
+  float32x2_t __s0_907 = __p0_907; \
+  float16x4_t __s1_907 = __p1_907; \
+  float16x4_t __s2_907 = __p2_907; \
+  float32x2_t __ret_907; \
+  __ret_907 = vfmlsl_low_f16(__s0_907, __s1_907, (float16x4_t) {vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907)}); \
   __ret_907; \
 })
 #else
-#define vsudot_lane_s32(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \
-  int32x2_t __s0_908 = __p0_908; \
-  int8x8_t __s1_908 = __p1_908; \
-  uint8x8_t __s2_908 = __p2_908; \
-  int32x2_t __rev0_908;  __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \
-  int8x8_t __rev1_908;  __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_908;  __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_908; \
-uint8x8_t __reint_908 = __rev2_908; \
-  __ret_908 = __noswap_vusdot_s32(__rev0_908, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_908, __p3_908)), __rev1_908); \
+#define vfmlsl_lane_low_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \
+  float32x2_t __s0_908 = __p0_908; \
+  float16x4_t __s1_908 = __p1_908; \
+  float16x4_t __s2_908 = __p2_908; \
+  float32x2_t __rev0_908;  __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \
+  float16x4_t __rev1_908;  __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 3, 2, 1, 0); \
+  float16x4_t __rev2_908;  __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 3, 2, 1, 0); \
+  float32x2_t __ret_908; \
+  __ret_908 = __noswap_vfmlsl_low_f16(__rev0_908, __rev1_908, (float16x4_t) {__noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908)}); \
   __ret_908 = __builtin_shufflevector(__ret_908, __ret_908, 1, 0); \
   __ret_908; \
 })
 #endif
 
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_lane_s32(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \
-  int32_t __s0_909 = __p0_909; \
-  int32_t __s1_909 = __p1_909; \
-  int32x2_t __s2_909 = __p2_909; \
-  int32_t __ret_909; \
-  __ret_909 = vqadds_s32(__s0_909, vqrdmulhs_s32(__s1_909, vget_lane_s32(__s2_909, __p3_909))); \
+#define vfmlslq_laneq_high_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \
+  float32x4_t __s0_909 = __p0_909; \
+  float16x8_t __s1_909 = __p1_909; \
+  float16x8_t __s2_909 = __p2_909; \
+  float32x4_t __ret_909; \
+  __ret_909 = vfmlslq_high_f16(__s0_909, __s1_909, (float16x8_t) {vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909)}); \
   __ret_909; \
 })
 #else
-#define vqrdmlahs_lane_s32(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \
-  int32_t __s0_910 = __p0_910; \
-  int32_t __s1_910 = __p1_910; \
-  int32x2_t __s2_910 = __p2_910; \
-  int32x2_t __rev2_910;  __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 1, 0); \
-  int32_t __ret_910; \
-  __ret_910 = vqadds_s32(__s0_910, vqrdmulhs_s32(__s1_910, __noswap_vget_lane_s32(__rev2_910, __p3_910))); \
+#define vfmlslq_laneq_high_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \
+  float32x4_t __s0_910 = __p0_910; \
+  float16x8_t __s1_910 = __p1_910; \
+  float16x8_t __s2_910 = __p2_910; \
+  float32x4_t __rev0_910;  __rev0_910 = __builtin_shufflevector(__s0_910, __s0_910, 3, 2, 1, 0); \
+  float16x8_t __rev1_910;  __rev1_910 = __builtin_shufflevector(__s1_910, __s1_910, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_910;  __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_910; \
+  __ret_910 = __noswap_vfmlslq_high_f16(__rev0_910, __rev1_910, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910)}); \
+  __ret_910 = __builtin_shufflevector(__ret_910, __ret_910, 3, 2, 1, 0); \
   __ret_910; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_lane_s16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \
-  int16_t __s0_911 = __p0_911; \
-  int16_t __s1_911 = __p1_911; \
-  int16x4_t __s2_911 = __p2_911; \
-  int16_t __ret_911; \
-  __ret_911 = vqaddh_s16(__s0_911, vqrdmulhh_s16(__s1_911, vget_lane_s16(__s2_911, __p3_911))); \
+#define vfmlsl_laneq_high_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \
+  float32x2_t __s0_911 = __p0_911; \
+  float16x4_t __s1_911 = __p1_911; \
+  float16x8_t __s2_911 = __p2_911; \
+  float32x2_t __ret_911; \
+  __ret_911 = vfmlsl_high_f16(__s0_911, __s1_911, (float16x4_t) {vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911)}); \
   __ret_911; \
 })
 #else
-#define vqrdmlahh_lane_s16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \
-  int16_t __s0_912 = __p0_912; \
-  int16_t __s1_912 = __p1_912; \
-  int16x4_t __s2_912 = __p2_912; \
-  int16x4_t __rev2_912;  __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 3, 2, 1, 0); \
-  int16_t __ret_912; \
-  __ret_912 = vqaddh_s16(__s0_912, vqrdmulhh_s16(__s1_912, __noswap_vget_lane_s16(__rev2_912, __p3_912))); \
+#define vfmlsl_laneq_high_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \
+  float32x2_t __s0_912 = __p0_912; \
+  float16x4_t __s1_912 = __p1_912; \
+  float16x8_t __s2_912 = __p2_912; \
+  float32x2_t __rev0_912;  __rev0_912 = __builtin_shufflevector(__s0_912, __s0_912, 1, 0); \
+  float16x4_t __rev1_912;  __rev1_912 = __builtin_shufflevector(__s1_912, __s1_912, 3, 2, 1, 0); \
+  float16x8_t __rev2_912;  __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_912; \
+  __ret_912 = __noswap_vfmlsl_high_f16(__rev0_912, __rev1_912, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912)}); \
+  __ret_912 = __builtin_shufflevector(__ret_912, __ret_912, 1, 0); \
   __ret_912; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_laneq_s32(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \
-  int32_t __s0_913 = __p0_913; \
-  int32_t __s1_913 = __p1_913; \
-  int32x4_t __s2_913 = __p2_913; \
-  int32_t __ret_913; \
-  __ret_913 = vqadds_s32(__s0_913, vqrdmulhs_s32(__s1_913, vgetq_lane_s32(__s2_913, __p3_913))); \
+#define vfmlslq_laneq_low_f16(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \
+  float32x4_t __s0_913 = __p0_913; \
+  float16x8_t __s1_913 = __p1_913; \
+  float16x8_t __s2_913 = __p2_913; \
+  float32x4_t __ret_913; \
+  __ret_913 = vfmlslq_low_f16(__s0_913, __s1_913, (float16x8_t) {vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913)}); \
   __ret_913; \
 })
 #else
-#define vqrdmlahs_laneq_s32(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \
-  int32_t __s0_914 = __p0_914; \
-  int32_t __s1_914 = __p1_914; \
-  int32x4_t __s2_914 = __p2_914; \
-  int32x4_t __rev2_914;  __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 3, 2, 1, 0); \
-  int32_t __ret_914; \
-  __ret_914 = vqadds_s32(__s0_914, vqrdmulhs_s32(__s1_914, __noswap_vgetq_lane_s32(__rev2_914, __p3_914))); \
+#define vfmlslq_laneq_low_f16(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \
+  float32x4_t __s0_914 = __p0_914; \
+  float16x8_t __s1_914 = __p1_914; \
+  float16x8_t __s2_914 = __p2_914; \
+  float32x4_t __rev0_914;  __rev0_914 = __builtin_shufflevector(__s0_914, __s0_914, 3, 2, 1, 0); \
+  float16x8_t __rev1_914;  __rev1_914 = __builtin_shufflevector(__s1_914, __s1_914, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_914;  __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_914; \
+  __ret_914 = __noswap_vfmlslq_low_f16(__rev0_914, __rev1_914, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914)}); \
+  __ret_914 = __builtin_shufflevector(__ret_914, __ret_914, 3, 2, 1, 0); \
   __ret_914; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_laneq_s16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \
-  int16_t __s0_915 = __p0_915; \
-  int16_t __s1_915 = __p1_915; \
-  int16x8_t __s2_915 = __p2_915; \
-  int16_t __ret_915; \
-  __ret_915 = vqaddh_s16(__s0_915, vqrdmulhh_s16(__s1_915, vgetq_lane_s16(__s2_915, __p3_915))); \
+#define vfmlsl_laneq_low_f16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \
+  float32x2_t __s0_915 = __p0_915; \
+  float16x4_t __s1_915 = __p1_915; \
+  float16x8_t __s2_915 = __p2_915; \
+  float32x2_t __ret_915; \
+  __ret_915 = vfmlsl_low_f16(__s0_915, __s1_915, (float16x4_t) {vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915)}); \
   __ret_915; \
 })
 #else
-#define vqrdmlahh_laneq_s16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \
-  int16_t __s0_916 = __p0_916; \
-  int16_t __s1_916 = __p1_916; \
-  int16x8_t __s2_916 = __p2_916; \
-  int16x8_t __rev2_916;  __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_916; \
-  __ret_916 = vqaddh_s16(__s0_916, vqrdmulhh_s16(__s1_916, __noswap_vgetq_lane_s16(__rev2_916, __p3_916))); \
+#define vfmlsl_laneq_low_f16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \
+  float32x2_t __s0_916 = __p0_916; \
+  float16x4_t __s1_916 = __p1_916; \
+  float16x8_t __s2_916 = __p2_916; \
+  float32x2_t __rev0_916;  __rev0_916 = __builtin_shufflevector(__s0_916, __s0_916, 1, 0); \
+  float16x4_t __rev1_916;  __rev1_916 = __builtin_shufflevector(__s1_916, __s1_916, 3, 2, 1, 0); \
+  float16x8_t __rev2_916;  __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_916; \
+  __ret_916 = __noswap_vfmlsl_low_f16(__rev0_916, __rev1_916, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916)}); \
+  __ret_916 = __builtin_shufflevector(__ret_916, __ret_916, 1, 0); \
   __ret_916; \
 })
 #endif
 
-__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
+#endif
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_lane_s32(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \
-  int32_t __s0_917 = __p0_917; \
-  int32_t __s1_917 = __p1_917; \
-  int32x2_t __s2_917 = __p2_917; \
-  int32_t __ret_917; \
-  __ret_917 = vqsubs_s32(__s0_917, vqrdmulhs_s32(__s1_917, vget_lane_s32(__s2_917, __p3_917))); \
+#define vmulh_lane_f16(__p0_917, __p1_917, __p2_917) __extension__ ({ \
+  float16_t __s0_917 = __p0_917; \
+  float16x4_t __s1_917 = __p1_917; \
+  float16_t __ret_917; \
+  __ret_917 = __s0_917 * vget_lane_f16(__s1_917, __p2_917); \
   __ret_917; \
 })
 #else
-#define vqrdmlshs_lane_s32(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \
-  int32_t __s0_918 = __p0_918; \
-  int32_t __s1_918 = __p1_918; \
-  int32x2_t __s2_918 = __p2_918; \
-  int32x2_t __rev2_918;  __rev2_918 = __builtin_shufflevector(__s2_918, __s2_918, 1, 0); \
-  int32_t __ret_918; \
-  __ret_918 = vqsubs_s32(__s0_918, vqrdmulhs_s32(__s1_918, __noswap_vget_lane_s32(__rev2_918, __p3_918))); \
+#define vmulh_lane_f16(__p0_918, __p1_918, __p2_918) __extension__ ({ \
+  float16_t __s0_918 = __p0_918; \
+  float16x4_t __s1_918 = __p1_918; \
+  float16x4_t __rev1_918;  __rev1_918 = __builtin_shufflevector(__s1_918, __s1_918, 3, 2, 1, 0); \
+  float16_t __ret_918; \
+  __ret_918 = __s0_918 * __noswap_vget_lane_f16(__rev1_918, __p2_918); \
   __ret_918; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_lane_s16(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \
-  int16_t __s0_919 = __p0_919; \
-  int16_t __s1_919 = __p1_919; \
-  int16x4_t __s2_919 = __p2_919; \
-  int16_t __ret_919; \
-  __ret_919 = vqsubh_s16(__s0_919, vqrdmulhh_s16(__s1_919, vget_lane_s16(__s2_919, __p3_919))); \
+#define vmulh_laneq_f16(__p0_919, __p1_919, __p2_919) __extension__ ({ \
+  float16_t __s0_919 = __p0_919; \
+  float16x8_t __s1_919 = __p1_919; \
+  float16_t __ret_919; \
+  __ret_919 = __s0_919 * vgetq_lane_f16(__s1_919, __p2_919); \
   __ret_919; \
 })
 #else
-#define vqrdmlshh_lane_s16(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \
-  int16_t __s0_920 = __p0_920; \
-  int16_t __s1_920 = __p1_920; \
-  int16x4_t __s2_920 = __p2_920; \
-  int16x4_t __rev2_920;  __rev2_920 = __builtin_shufflevector(__s2_920, __s2_920, 3, 2, 1, 0); \
-  int16_t __ret_920; \
-  __ret_920 = vqsubh_s16(__s0_920, vqrdmulhh_s16(__s1_920, __noswap_vget_lane_s16(__rev2_920, __p3_920))); \
+#define vmulh_laneq_f16(__p0_920, __p1_920, __p2_920) __extension__ ({ \
+  float16_t __s0_920 = __p0_920; \
+  float16x8_t __s1_920 = __p1_920; \
+  float16x8_t __rev1_920;  __rev1_920 = __builtin_shufflevector(__s1_920, __s1_920, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16_t __ret_920; \
+  __ret_920 = __s0_920 * __noswap_vgetq_lane_f16(__rev1_920, __p2_920); \
   __ret_920; \
 })
 #endif
 
+#endif
+#if defined(__ARM_FEATURE_MATMUL_INT8)
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_laneq_s32(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \
-  int32_t __s0_921 = __p0_921; \
-  int32_t __s1_921 = __p1_921; \
-  int32x4_t __s2_921 = __p2_921; \
-  int32_t __ret_921; \
-  __ret_921 = vqsubs_s32(__s0_921, vqrdmulhs_s32(__s1_921, vgetq_lane_s32(__s2_921, __p3_921))); \
+#define vsudotq_lane_s32(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \
+  int32x4_t __s0_921 = __p0_921; \
+  int8x16_t __s1_921 = __p1_921; \
+  uint8x8_t __s2_921 = __p2_921; \
+  int32x4_t __ret_921; \
+uint8x8_t __reint_921 = __s2_921; \
+  __ret_921 = vusdotq_s32(__s0_921, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_921, __p3_921)), __s1_921); \
   __ret_921; \
 })
 #else
-#define vqrdmlshs_laneq_s32(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \
-  int32_t __s0_922 = __p0_922; \
-  int32_t __s1_922 = __p1_922; \
-  int32x4_t __s2_922 = __p2_922; \
-  int32x4_t __rev2_922;  __rev2_922 = __builtin_shufflevector(__s2_922, __s2_922, 3, 2, 1, 0); \
-  int32_t __ret_922; \
-  __ret_922 = vqsubs_s32(__s0_922, vqrdmulhs_s32(__s1_922, __noswap_vgetq_lane_s32(__rev2_922, __p3_922))); \
+#define vsudotq_lane_s32(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \
+  int32x4_t __s0_922 = __p0_922; \
+  int8x16_t __s1_922 = __p1_922; \
+  uint8x8_t __s2_922 = __p2_922; \
+  int32x4_t __rev0_922;  __rev0_922 = __builtin_shufflevector(__s0_922, __s0_922, 3, 2, 1, 0); \
+  int8x16_t __rev1_922;  __rev1_922 = __builtin_shufflevector(__s1_922, __s1_922, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_922;  __rev2_922 = __builtin_shufflevector(__s2_922, __s2_922, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_922; \
+uint8x8_t __reint_922 = __rev2_922; \
+  __ret_922 = __noswap_vusdotq_s32(__rev0_922, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_922, __p3_922)), __rev1_922); \
+  __ret_922 = __builtin_shufflevector(__ret_922, __ret_922, 3, 2, 1, 0); \
   __ret_922; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_laneq_s16(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \
-  int16_t __s0_923 = __p0_923; \
-  int16_t __s1_923 = __p1_923; \
-  int16x8_t __s2_923 = __p2_923; \
-  int16_t __ret_923; \
-  __ret_923 = vqsubh_s16(__s0_923, vqrdmulhh_s16(__s1_923, vgetq_lane_s16(__s2_923, __p3_923))); \
+#define vsudot_lane_s32(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \
+  int32x2_t __s0_923 = __p0_923; \
+  int8x8_t __s1_923 = __p1_923; \
+  uint8x8_t __s2_923 = __p2_923; \
+  int32x2_t __ret_923; \
+uint8x8_t __reint_923 = __s2_923; \
+  __ret_923 = vusdot_s32(__s0_923, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_923, __p3_923)), __s1_923); \
   __ret_923; \
 })
 #else
-#define vqrdmlshh_laneq_s16(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \
-  int16_t __s0_924 = __p0_924; \
-  int16_t __s1_924 = __p1_924; \
-  int16x8_t __s2_924 = __p2_924; \
-  int16x8_t __rev2_924;  __rev2_924 = __builtin_shufflevector(__s2_924, __s2_924, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_924; \
-  __ret_924 = vqsubh_s16(__s0_924, vqrdmulhh_s16(__s1_924, __noswap_vgetq_lane_s16(__rev2_924, __p3_924))); \
+#define vsudot_lane_s32(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \
+  int32x2_t __s0_924 = __p0_924; \
+  int8x8_t __s1_924 = __p1_924; \
+  uint8x8_t __s2_924 = __p2_924; \
+  int32x2_t __rev0_924;  __rev0_924 = __builtin_shufflevector(__s0_924, __s0_924, 1, 0); \
+  int8x8_t __rev1_924;  __rev1_924 = __builtin_shufflevector(__s1_924, __s1_924, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_924;  __rev2_924 = __builtin_shufflevector(__s2_924, __s2_924, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_924; \
+uint8x8_t __reint_924 = __rev2_924; \
+  __ret_924 = __noswap_vusdot_s32(__rev0_924, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_924, __p3_924)), __rev1_924); \
+  __ret_924 = __builtin_shufflevector(__ret_924, __ret_924, 1, 0); \
   __ret_924; \
 })
 #endif
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_sve.h b/darwin-x86/lib64/clang/14.0.6/include/arm_sve.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/arm_sve.h
rename to darwin-x86/lib64/clang/14.0.6/include/arm_sve.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/armintr.h b/darwin-x86/lib64/clang/14.0.6/include/armintr.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/armintr.h
rename to darwin-x86/lib64/clang/14.0.6/include/armintr.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx2intrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx2intrin.h
similarity index 97%
rename from darwin-x86/lib64/clang/14.0.2/include/avx2intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx2intrin.h
index 5064c87..e33514a 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx2intrin.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/avx2intrin.h
@@ -26,19 +26,19 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi8(__m256i __a)
 {
-    return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a);
+    return (__m256i)__builtin_elementwise_abs((__v32qs)__a);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi16(__m256i __a)
 {
-    return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a);
+    return (__m256i)__builtin_elementwise_abs((__v16hi)__a);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi32(__m256i __a)
 {
-    return (__m256i)__builtin_ia32_pabsd256((__v8si)__a);
+    return (__m256i)__builtin_elementwise_abs((__v8si)__a);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -253,73 +253,73 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_min((__v8su)__a, (__v8su)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS256
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512bf16intrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512bf16intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512bf16intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512bf16intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512bitalgintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512bitalgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512bitalgintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512bitalgintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512bwintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512bwintrin.h
similarity index 98%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512bwintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512bwintrin.h
index 6aee8ae..522ef10 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512bwintrin.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/avx512bwintrin.h
@@ -485,7 +485,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi8 (__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsb512((__v64qi)__A);
+  return (__m512i)__builtin_elementwise_abs((__v64qs)__A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -507,7 +507,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi16 (__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsw512((__v32hi)__A);
+  return (__m512i)__builtin_elementwise_abs((__v32hi)__A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -751,7 +751,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epi8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsb512((__v64qi) __A, (__v64qi) __B);
+  return (__m512i)__builtin_elementwise_max((__v64qs) __A, (__v64qs) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -773,7 +773,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epi16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsw512((__v32hi) __A, (__v32hi) __B);
+  return (__m512i)__builtin_elementwise_max((__v32hi) __A, (__v32hi) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -796,7 +796,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxub512((__v64qi)__A, (__v64qi)__B);
+  return (__m512i)__builtin_elementwise_max((__v64qu)__A, (__v64qu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -818,7 +818,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxuw512((__v32hi)__A, (__v32hi)__B);
+  return (__m512i)__builtin_elementwise_max((__v32hu)__A, (__v32hu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -840,7 +840,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epi8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsb512((__v64qi) __A, (__v64qi) __B);
+  return (__m512i)__builtin_elementwise_min((__v64qs) __A, (__v64qs) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -862,7 +862,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epi16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsw512((__v32hi) __A, (__v32hi) __B);
+  return (__m512i)__builtin_elementwise_min((__v32hi) __A, (__v32hi) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -884,7 +884,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminub512((__v64qi)__A, (__v64qi)__B);
+  return (__m512i)__builtin_elementwise_min((__v64qu)__A, (__v64qu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -906,7 +906,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminuw512((__v32hi)__A, (__v32hi)__B);
+  return (__m512i)__builtin_elementwise_min((__v32hu)__A, (__v32hu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512cdintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512cdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512cdintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512cdintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512dqintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512dqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512dqintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512dqintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512erintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512erintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512erintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512erintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512fintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512fintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512fintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512fintrin.h
index df29864..50e0e28 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512fintrin.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/avx512fintrin.h
@@ -26,6 +26,10 @@
 typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
 typedef unsigned int __v16su __attribute__((__vector_size__(64)));
 
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v64qs __attribute__((__vector_size__(64)));
+
 typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64)));
 typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64)));
 typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64)));
@@ -1086,7 +1090,7 @@
 __DEFAULT_FN_ATTRS512
 _mm512_max_epi32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_max((__v16si)__A, (__v16si)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1108,7 +1112,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_max((__v16su)__A, (__v16su)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1130,7 +1134,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epi64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_max((__v8di)__A, (__v8di)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1152,7 +1156,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_max((__v8du)__A, (__v8du)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1321,7 +1325,7 @@
 __DEFAULT_FN_ATTRS512
 _mm512_min_epi32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_min((__v16si)__A, (__v16si)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1343,7 +1347,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_min((__v16su)__A, (__v16su)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1365,7 +1369,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epi64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_min((__v8di)__A, (__v8di)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1387,7 +1391,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_min((__v8du)__A, (__v8du)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1846,7 +1850,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi64(__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsq512((__v8di)__A);
+  return (__m512i)__builtin_elementwise_abs((__v8di)__A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1868,7 +1872,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi32(__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsd512((__v16si) __A);
+  return (__m512i)__builtin_elementwise_abs((__v16si) __A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -9320,11 +9324,11 @@
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_and_q512(__W);
+  return __builtin_reduce_and((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_or_q512(__W);
+  return __builtin_reduce_or((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
@@ -9342,13 +9346,13 @@
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
   __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
-  return __builtin_ia32_reduce_and_q512(__W);
+  return __builtin_reduce_and((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
   __W = _mm512_maskz_mov_epi64(__M, __W);
-  return __builtin_ia32_reduce_or_q512(__W);
+  return __builtin_reduce_or((__v8di)__W);
 }
 
 // -0.0 is used to ignore the start value since it is the neutral value of
@@ -9386,12 +9390,12 @@
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_and_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_and_d512((__v16si)__W);
+  return __builtin_reduce_and((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_or_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_or_d512((__v16si)__W);
+  return __builtin_reduce_or((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
@@ -9409,13 +9413,13 @@
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
   __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
-  return __builtin_ia32_reduce_and_d512((__v16si)__W);
+  return __builtin_reduce_and((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
   __W = _mm512_maskz_mov_epi32(__M, __W);
-  return __builtin_ia32_reduce_or_d512((__v16si)__W);
+  return __builtin_reduce_or((__v16si)__W);
 }
 
 static __inline__ float __DEFAULT_FN_ATTRS512
@@ -9442,89 +9446,89 @@
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epi64(__m512i __V) {
-  return __builtin_ia32_reduce_smax_q512(__V);
+  return __builtin_reduce_max((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epu64(__m512i __V) {
-  return __builtin_ia32_reduce_umax_q512(__V);
+  return __builtin_reduce_max((__v8du)__V);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epi64(__m512i __V) {
-  return __builtin_ia32_reduce_smin_q512(__V);
+  return __builtin_reduce_min((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epu64(__m512i __V) {
-  return __builtin_ia32_reduce_umin_q512(__V);
+  return __builtin_reduce_min((__v8du)__V);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
-  return __builtin_ia32_reduce_smax_q512(__V);
+  return __builtin_reduce_max((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
   __V = _mm512_maskz_mov_epi64(__M, __V);
-  return __builtin_ia32_reduce_umax_q512(__V);
+  return __builtin_reduce_max((__v8du)__V);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
-  return __builtin_ia32_reduce_smin_q512(__V);
+  return __builtin_reduce_min((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
-  return __builtin_ia32_reduce_umin_q512(__V);
+  return __builtin_reduce_min((__v8du)__V);
 }
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epi32(__m512i __V) {
-  return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epu32(__m512i __V) {
-  return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16su)__V);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epi32(__m512i __V) {
-  return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epu32(__m512i __V) {
-  return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16su)__V);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
-  return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
   __V = _mm512_maskz_mov_epi32(__M, __V);
-  return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16su)__V);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
-  return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
-  return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16su)__V);
 }
 
 static __inline__ double __DEFAULT_FN_ATTRS512
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512fp16intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512fp16intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512ifmaintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512ifmaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512ifmaintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512ifmaintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512ifmavlintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512ifmavlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512ifmavlintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512ifmavlintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512pfintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512pfintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512pfintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512pfintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vbmi2intrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vbmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vbmi2intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vbmi2intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vbmiintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vbmiintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vbmiintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vbmiintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vbmivlintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vbmivlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vbmivlintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vbmivlintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vlbf16intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vlbf16intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlbitalgintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vlbitalgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlbitalgintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vlbitalgintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vlbwintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vlbwintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlcdintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vlcdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlcdintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vlcdintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vldqintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vldqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vldqintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vldqintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vlfp16intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vlfp16intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vlintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vlintrin.h
index 0519dba..178c9db 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512vlintrin.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/avx512vlintrin.h
@@ -2988,7 +2988,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_abs_epi64 (__m128i __A) {
-  return (__m128i)__builtin_ia32_pabsq128((__v2di)__A);
+  return (__m128i)__builtin_elementwise_abs((__v2di)__A);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3007,7 +3007,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi64 (__m256i __A) {
-  return (__m256i)__builtin_ia32_pabsq256 ((__v4di)__A);
+  return (__m256i)__builtin_elementwise_abs((__v4di)__A);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3054,7 +3054,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_max_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pmaxsq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_max((__v2di)__A, (__v2di)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3073,7 +3073,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pmaxsq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_max((__v4di)__A, (__v4di)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3120,7 +3120,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_max_epu64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pmaxuq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_max((__v2du)__A, (__v2du)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3139,7 +3139,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pmaxuq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_max((__v4du)__A, (__v4du)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3186,7 +3186,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_min_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pminsq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_min((__v2di)__A, (__v2di)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3205,7 +3205,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pminsq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_min((__v4di)__A, (__v4di)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3252,7 +3252,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_min_epu64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pminuq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_min((__v2du)__A, (__v2du)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3271,7 +3271,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pminuq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_min((__v4du)__A, (__v4du)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlvbmi2intrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vlvbmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlvbmi2intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vlvbmi2intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vlvnniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vlvnniintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlvp2intersectintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vlvp2intersectintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vlvp2intersectintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vlvp2intersectintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vnniintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vnniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vnniintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vnniintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vp2intersectintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vp2intersectintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vp2intersectintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vp2intersectintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vpopcntdqintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vpopcntdqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vpopcntdqintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vpopcntdqintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vpopcntdqvlintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avx512vpopcntdqvlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avx512vpopcntdqvlintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avx512vpopcntdqvlintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avxintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avxintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avxintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avxvnniintrin.h b/darwin-x86/lib64/clang/14.0.6/include/avxvnniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/avxvnniintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/avxvnniintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/bits/stdatomic.h b/darwin-x86/lib64/clang/14.0.6/include/bits/stdatomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/bits/stdatomic.h
rename to darwin-x86/lib64/clang/14.0.6/include/bits/stdatomic.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/bmi2intrin.h b/darwin-x86/lib64/clang/14.0.6/include/bmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/bmi2intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/bmi2intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/bmiintrin.h b/darwin-x86/lib64/clang/14.0.6/include/bmiintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/bmiintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/bmiintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/builtins.h b/darwin-x86/lib64/clang/14.0.6/include/builtins.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/builtins.h
rename to darwin-x86/lib64/clang/14.0.6/include/builtins.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cet.h b/darwin-x86/lib64/clang/14.0.6/include/cet.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/cet.h
rename to darwin-x86/lib64/clang/14.0.6/include/cet.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cetintrin.h b/darwin-x86/lib64/clang/14.0.6/include/cetintrin.h
similarity index 91%
rename from darwin-x86/lib64/clang/14.0.2/include/cetintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/cetintrin.h
index 4290e9d..019cab0 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/cetintrin.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/cetintrin.h
@@ -42,10 +42,20 @@
   return __builtin_ia32_rdsspd(__a);
 }
 
+static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd_i32() {
+  unsigned int t;
+  return __builtin_ia32_rdsspd(t);
+}
+
 #ifdef __x86_64__
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long long __a) {
   return __builtin_ia32_rdsspq(__a);
 }
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq_i64() {
+  unsigned long long t;
+  return __builtin_ia32_rdsspq(t);
+}
 #endif /* __x86_64__ */
 
 #ifdef __x86_64__
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cldemoteintrin.h b/darwin-x86/lib64/clang/14.0.6/include/cldemoteintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/cldemoteintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/cldemoteintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/clflushoptintrin.h b/darwin-x86/lib64/clang/14.0.6/include/clflushoptintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/clflushoptintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/clflushoptintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/clwbintrin.h b/darwin-x86/lib64/clang/14.0.6/include/clwbintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/clwbintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/clwbintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/clzerointrin.h b/darwin-x86/lib64/clang/14.0.6/include/clzerointrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/clzerointrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/clzerointrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cpuid.h b/darwin-x86/lib64/clang/14.0.6/include/cpuid.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/cpuid.h
rename to darwin-x86/lib64/clang/14.0.6/include/cpuid.h
index 6df1b4a..5d262a6 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/cpuid.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/cpuid.h
@@ -200,7 +200,7 @@
 #define bit_AMXINT8       0x02000000
 
 /* Features in %eax for leaf 7 sub-leaf 1 */
-#define bit_AVXVNNI       0x00000008
+#define bit_AVXVNNI       0x00000010
 #define bit_AVX512BF16    0x00000020
 #define bit_HRESET        0x00400000
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/crc32intrin.h b/darwin-x86/lib64/clang/14.0.6/include/crc32intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/crc32intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/crc32intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cuda_wrappers/algorithm b/darwin-x86/lib64/clang/14.0.6/include/cuda_wrappers/algorithm
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/cuda_wrappers/algorithm
rename to darwin-x86/lib64/clang/14.0.6/include/cuda_wrappers/algorithm
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cuda_wrappers/complex b/darwin-x86/lib64/clang/14.0.6/include/cuda_wrappers/complex
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/cuda_wrappers/complex
rename to darwin-x86/lib64/clang/14.0.6/include/cuda_wrappers/complex
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cuda_wrappers/new b/darwin-x86/lib64/clang/14.0.6/include/cuda_wrappers/new
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/cuda_wrappers/new
rename to darwin-x86/lib64/clang/14.0.6/include/cuda_wrappers/new
diff --git a/darwin-x86/lib64/clang/14.0.2/include/emmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/emmintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/emmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/emmintrin.h
index 6e9c303..4618b80 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/emmintrin.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/emmintrin.h
@@ -2375,7 +2375,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_max_epi16(__m128i __a, __m128i __b)
 {
-  return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
+  return (__m128i)__builtin_elementwise_max((__v8hi)__a, (__v8hi)__b);
 }
 
 /// Compares corresponding elements of two 128-bit unsigned [16 x i8]
@@ -2395,7 +2395,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_max_epu8(__m128i __a, __m128i __b)
 {
-  return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
+  return (__m128i)__builtin_elementwise_max((__v16qu)__a, (__v16qu)__b);
 }
 
 /// Compares corresponding elements of two 128-bit signed [8 x i16]
@@ -2415,7 +2415,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_min_epi16(__m128i __a, __m128i __b)
 {
-  return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
+  return (__m128i)__builtin_elementwise_min((__v8hi)__a, (__v8hi)__b);
 }
 
 /// Compares corresponding elements of two 128-bit unsigned [16 x i8]
@@ -2435,7 +2435,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_min_epu8(__m128i __a, __m128i __b)
 {
-  return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
+  return (__m128i)__builtin_elementwise_min((__v16qu)__a, (__v16qu)__b);
 }
 
 /// Multiplies the corresponding elements of two signed [8 x i16]
diff --git a/darwin-x86/lib64/clang/14.0.2/include/enqcmdintrin.h b/darwin-x86/lib64/clang/14.0.6/include/enqcmdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/enqcmdintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/enqcmdintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/f16cintrin.h b/darwin-x86/lib64/clang/14.0.6/include/f16cintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/f16cintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/f16cintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/float.h b/darwin-x86/lib64/clang/14.0.6/include/float.h
similarity index 83%
rename from darwin-x86/lib64/clang/14.0.2/include/float.h
rename to darwin-x86/lib64/clang/14.0.6/include/float.h
index ed610b2..c6a6cc0 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/float.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/float.h
@@ -14,10 +14,11 @@
  * additional definitions provided for Windows.
  * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
  *
- * Also fall back on Darwin to allow additional definitions and
+ * Also fall back on Darwin and AIX to allow additional definitions and
  * implementation-defined values.
  */
-#if (defined(__APPLE__) || (defined(__MINGW32__) || defined(_MSC_VER))) && \
+#if (defined(__APPLE__) || defined(__MINGW32__) || defined(_MSC_VER) ||        \
+     defined(_AIX)) &&                                                         \
     __STDC_HOSTED__ && __has_include_next(<float.h>)
 
 /* Prior to Apple's 10.7 SDK, float.h SDK header used to apply an extra level
@@ -37,7 +38,9 @@
 #  undef FLT_MANT_DIG
 #  undef DBL_MANT_DIG
 #  undef LDBL_MANT_DIG
-#  if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
+#  if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) ||              \
+      __cplusplus >= 201103L ||                                                \
+      (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #    undef DECIMAL_DIG
 #  endif
 #  undef FLT_DIG
@@ -64,7 +67,9 @@
 #  undef FLT_MIN
 #  undef DBL_MIN
 #  undef LDBL_MIN
-#  if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
+#  if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) ||              \
+      __cplusplus >= 201703L ||                                                \
+      (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #    undef FLT_TRUE_MIN
 #    undef DBL_TRUE_MIN
 #    undef LDBL_TRUE_MIN
@@ -87,7 +92,9 @@
 #define DBL_MANT_DIG __DBL_MANT_DIG__
 #define LDBL_MANT_DIG __LDBL_MANT_DIG__
 
-#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
+#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) ||                \
+    __cplusplus >= 201103L ||                                                  \
+    (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #  define DECIMAL_DIG __DECIMAL_DIG__
 #endif
 
@@ -123,7 +130,9 @@
 #define DBL_MIN __DBL_MIN__
 #define LDBL_MIN __LDBL_MIN__
 
-#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
+#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) ||                \
+    __cplusplus >= 201703L ||                                                  \
+    (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #  define FLT_TRUE_MIN __FLT_DENORM_MIN__
 #  define DBL_TRUE_MIN __DBL_DENORM_MIN__
 #  define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
diff --git a/darwin-x86/lib64/clang/14.0.2/include/fma4intrin.h b/darwin-x86/lib64/clang/14.0.6/include/fma4intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/fma4intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/fma4intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/fmaintrin.h b/darwin-x86/lib64/clang/14.0.6/include/fmaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/fmaintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/fmaintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/fuzzer/FuzzedDataProvider.h b/darwin-x86/lib64/clang/14.0.6/include/fuzzer/FuzzedDataProvider.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/fuzzer/FuzzedDataProvider.h
rename to darwin-x86/lib64/clang/14.0.6/include/fuzzer/FuzzedDataProvider.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/fxsrintrin.h b/darwin-x86/lib64/clang/14.0.6/include/fxsrintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/fxsrintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/fxsrintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/gfniintrin.h b/darwin-x86/lib64/clang/14.0.6/include/gfniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/gfniintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/gfniintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hexagon_circ_brev_intrinsics.h b/darwin-x86/lib64/clang/14.0.6/include/hexagon_circ_brev_intrinsics.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/hexagon_circ_brev_intrinsics.h
rename to darwin-x86/lib64/clang/14.0.6/include/hexagon_circ_brev_intrinsics.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hexagon_protos.h b/darwin-x86/lib64/clang/14.0.6/include/hexagon_protos.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/hexagon_protos.h
rename to darwin-x86/lib64/clang/14.0.6/include/hexagon_protos.h
index cdffd93..2642f3c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hexagon_protos.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/hexagon_protos.h
@@ -8003,17 +8003,6 @@
 #define Q6_P_vtrunohb_PP __builtin_HEXAGON_S6_vtrunohb_ppp
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vmem(Rt32):nt
-   C Intrinsic Prototype: HVX_Vector Q6_V_vmem_R_nt(Word32 Rt)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vmem_R_nt __builtin_HEXAGON_V6_ldntnt0
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
 #if __HEXAGON_ARCH__ >= 65
 /* ==========================================================================
    Assembly Syntax:       Pd4=!any8(vcmpb.eq(Rss32,Rtt32))
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hexagon_types.h b/darwin-x86/lib64/clang/14.0.6/include/hexagon_types.h
similarity index 98%
rename from darwin-x86/lib64/clang/14.0.2/include/hexagon_types.h
rename to darwin-x86/lib64/clang/14.0.6/include/hexagon_types.h
index 6958809..029727c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hexagon_types.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/hexagon_types.h
@@ -1177,37 +1177,6 @@
 
 #endif /* __cplusplus */
 
-// V65 Silver types
-#if __Q6S_ARCH__ >= 65
-  // Silver vector types are 128 bytes, and pairs are 256. The vector predicate
-  // types are 16 bytes and 32 bytes for pairs.
-  typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(16)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_VecPred256 __attribute__((__vector_size__(32)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(256)));
-
-  typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(4)));
-
-  typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(4)));
-
-  #define Q6S_VectorPredPair HEXAGON_VecPred256
-  #define Q6S_VectorPred     HEXAGON_VecPred128
-  #define Q6S_Vector         HEXAGON_Vect1024
-  #define Q6S_VectorPair     HEXAGON_Vect2048
-  #define Q6S_UVector        HEXAGON_UVect1024
-  #define Q6S_UVectorPair    HEXAGON_UVect2048
-
-#else /* __Q6S_ARCH__ >= 65 */
-
 // V65 Vector types
 #if __HVX_ARCH__ >= 65
 #if defined __HVX__ && (__HVX_LENGTH__ == 128)
@@ -1256,7 +1225,6 @@
 #endif /* defined __HVX__ &&  (__HVX_LENGTH__ == 64) */
 #endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
 #endif /* __HVX_ARCH__ >= 65 */
-#endif /* __Q6S_ARCH__ >= 65 */
 
 /* Predicates */
 
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hresetintrin.h b/darwin-x86/lib64/clang/14.0.6/include/hresetintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/hresetintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/hresetintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/htmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/htmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/htmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/htmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/htmxlintrin.h b/darwin-x86/lib64/clang/14.0.6/include/htmxlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/htmxlintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/htmxlintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h b/darwin-x86/lib64/clang/14.0.6/include/hvx_hexagon_protos.h
similarity index 67%
rename from darwin-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h
rename to darwin-x86/lib64/clang/14.0.6/include/hvx_hexagon_protos.h
index 41ce7a6..7e3679a 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/hvx_hexagon_protos.h
@@ -9,7 +9,6 @@
 //===----------------------------------------------------------------------===//
 
 
-
 #ifndef _HVX_HEXAGON_PROTOS_H_
 #define _HVX_HEXAGON_PROTOS_H_ 1
 
@@ -28,7 +27,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_R_vextract_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)
+#define Q6_R_vextract_VR(Vu,Rs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)(Vu,Rs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -39,7 +38,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_hi_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)
+#define Q6_V_hi_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)(Vss)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -50,7 +49,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_lo_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)
+#define Q6_V_lo_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)(Vss)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -61,7 +60,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)
+#define Q6_V_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)(Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -72,7 +71,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_and_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)
+#define Q6_Q_and_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -83,7 +82,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_and_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)
+#define Q6_Q_and_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -94,7 +93,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_not_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)
+#define Q6_Q_not_Q(Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -105,7 +104,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_or_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)
+#define Q6_Q_or_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -116,7 +115,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_or_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)
+#define Q6_Q_or_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -127,7 +126,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vsetq_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)
+#define Q6_Q_vsetq_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)(Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -138,7 +137,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_xor_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)
+#define Q6_Q_xor_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -149,7 +148,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QnRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)
+#define Q6_vmem_QnRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -160,7 +159,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QnRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)
+#define Q6_vmem_QnRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -171,7 +170,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)
+#define Q6_vmem_QRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -182,7 +181,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)
+#define Q6_vmem_QRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -193,7 +192,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuh_vabsdiff_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)
+#define Q6_Vuh_vabsdiff_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -204,7 +203,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vub_vabsdiff_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)
+#define Q6_Vub_vabsdiff_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -215,7 +214,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuh_vabsdiff_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)
+#define Q6_Vuh_vabsdiff_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -226,7 +225,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vabsdiff_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)
+#define Q6_Vuw_vabsdiff_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -237,7 +236,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vabs_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)
+#define Q6_Vh_vabs_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -248,7 +247,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vabs_Vh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)
+#define Q6_Vh_vabs_Vh_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -259,7 +258,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vabs_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)
+#define Q6_Vw_vabs_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -270,7 +269,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vabs_Vw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)
+#define Q6_Vw_vabs_Vw_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -281,7 +280,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vadd_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)
+#define Q6_Vb_vadd_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -292,7 +291,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vadd_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)
+#define Q6_Wb_vadd_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -303,7 +302,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condacc_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)
+#define Q6_Vb_condacc_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -314,7 +313,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)
+#define Q6_Vb_condacc_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -325,7 +324,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)
+#define Q6_Vh_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -336,7 +335,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vadd_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)
+#define Q6_Wh_vadd_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -347,7 +346,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condacc_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)
+#define Q6_Vh_condacc_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -358,7 +357,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)
+#define Q6_Vh_condacc_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -369,7 +368,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vadd_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)
+#define Q6_Vh_vadd_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -380,7 +379,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vadd_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)
+#define Q6_Wh_vadd_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -391,7 +390,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)
+#define Q6_Ww_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -402,7 +401,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vadd_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)
+#define Q6_Wh_vadd_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -413,7 +412,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vadd_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)
+#define Q6_Vub_vadd_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -424,7 +423,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wub_vadd_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)
+#define Q6_Wub_vadd_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -435,7 +434,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vadd_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)
+#define Q6_Vuh_vadd_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -446,7 +445,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vadd_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)
+#define Q6_Wuh_vadd_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -457,7 +456,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vadd_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)
+#define Q6_Ww_vadd_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -468,7 +467,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)
+#define Q6_Vw_vadd_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -479,7 +478,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vadd_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)
+#define Q6_Ww_vadd_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -490,7 +489,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condacc_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)
+#define Q6_Vw_condacc_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -501,7 +500,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)
+#define Q6_Vw_condacc_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -512,7 +511,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)
+#define Q6_Vw_vadd_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -523,7 +522,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vadd_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)
+#define Q6_Ww_vadd_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -534,7 +533,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_valign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)
+#define Q6_V_valign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -545,7 +544,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_valign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)
+#define Q6_V_valign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -556,7 +555,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vand_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)
+#define Q6_V_vand_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -567,7 +566,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vand_QR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)
+#define Q6_V_vand_QR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -578,7 +577,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vandor_VQR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)
+#define Q6_V_vandor_VQR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -589,7 +588,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Q_vand_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)
+#define Q6_Q_vand_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)(Vu,Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -600,7 +599,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Q_vandor_QVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)
+#define Q6_Q_vandor_QVR(Qx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -611,7 +610,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasl_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)
+#define Q6_Vh_vasl_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -622,7 +621,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasl_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)
+#define Q6_Vh_vasl_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -633,7 +632,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasl_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)
+#define Q6_Vw_vasl_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -644,7 +643,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vaslacc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)
+#define Q6_Vw_vaslacc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -655,7 +654,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasl_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)
+#define Q6_Vw_vasl_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -666,7 +665,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)
+#define Q6_Vh_vasr_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -677,7 +676,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)
+#define Q6_Vb_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -688,7 +687,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)
+#define Q6_Vub_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -699,7 +698,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)
+#define Q6_Vub_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -710,7 +709,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)
+#define Q6_Vh_vasr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -721,7 +720,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasr_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)
+#define Q6_Vw_vasr_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -732,7 +731,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasracc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)
+#define Q6_Vw_vasracc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -743,7 +742,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)
+#define Q6_Vh_vasr_VwVwR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -754,7 +753,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)
+#define Q6_Vh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -765,7 +764,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)
+#define Q6_Vh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -776,7 +775,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)
+#define Q6_Vuh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -787,7 +786,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)
+#define Q6_Vw_vasr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -798,7 +797,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_equals_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)
+#define Q6_V_equals_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -809,7 +808,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_equals_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)
+#define Q6_W_equals_W(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)(Vuu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -820,7 +819,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)
+#define Q6_Vh_vavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -831,7 +830,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vavg_VhVh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)
+#define Q6_Vh_vavg_VhVh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -842,7 +841,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)
+#define Q6_Vub_vavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -853,7 +852,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vavg_VubVub_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)
+#define Q6_Vub_vavg_VubVub_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -864,7 +863,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vavg_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)
+#define Q6_Vuh_vavg_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -875,7 +874,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vavg_VuhVuh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)
+#define Q6_Vuh_vavg_VuhVuh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -886,7 +885,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)
+#define Q6_Vw_vavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -897,7 +896,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vavg_VwVw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)
+#define Q6_Vw_vavg_VwVw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -908,7 +907,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vcl0_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)
+#define Q6_Vuh_vcl0_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -919,7 +918,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vcl0_Vuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)
+#define Q6_Vuw_vcl0_Vuw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -930,7 +929,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vcombine_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)
+#define Q6_W_vcombine_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -941,7 +940,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)
+#define Q6_V_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)()
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -952,7 +951,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vdeal_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)
+#define Q6_Vb_vdeal_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -963,7 +962,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vdeale_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)
+#define Q6_Vb_vdeale_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -974,7 +973,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vdeal_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)
+#define Q6_Vh_vdeal_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -985,7 +984,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vdeal_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)
+#define Q6_W_vdeal_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -996,7 +995,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)
+#define Q6_V_vdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1007,7 +1006,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vdmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)
+#define Q6_Vh_vdmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1018,7 +1017,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vdmpyacc_VhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)
+#define Q6_Vh_vdmpyacc_VhVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1029,7 +1028,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vdmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)
+#define Q6_Wh_vdmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1040,7 +1039,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vdmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)
+#define Q6_Wh_vdmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1051,7 +1050,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)
+#define Q6_Vw_vdmpy_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1062,7 +1061,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)
+#define Q6_Vw_vdmpyacc_VwVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1073,7 +1072,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vdmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)
+#define Q6_Ww_vdmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1084,7 +1083,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vdmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)
+#define Q6_Ww_vdmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1095,7 +1094,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_WhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)
+#define Q6_Vw_vdmpy_WhRh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1106,29 +1105,29 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwWhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)
+#define Q6_Vw_vdmpyacc_VwWhRh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)(Vx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.h):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)
+#define Q6_Vw_vdmpy_VhRh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)
+#define Q6_Vw_vdmpyacc_VwVhRh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1139,7 +1138,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_WhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)
+#define Q6_Vw_vdmpy_WhRuh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1150,40 +1149,40 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwWhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)
+#define Q6_Vw_vdmpyacc_VwWhRuh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)(Vx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)
+#define Q6_Vw_vdmpy_VhRuh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)
+#define Q6_Vw_vdmpyacc_VwVhRuh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Vv32.h):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)
+#define Q6_Vw_vdmpy_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1194,7 +1193,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)
+#define Q6_Vw_vdmpyacc_VwVhVh_sat(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1205,7 +1204,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vdsad_WuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)
+#define Q6_Wuw_vdsad_WuhRuh(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1216,7 +1215,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vdsadacc_WuwWuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)
+#define Q6_Wuw_vdsadacc_WuwWuhRuh(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1227,7 +1226,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eq_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)
+#define Q6_Q_vcmp_eq_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1238,7 +1237,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)
+#define Q6_Q_vcmp_eqand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1249,7 +1248,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)
+#define Q6_Q_vcmp_eqor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1260,7 +1259,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)
+#define Q6_Q_vcmp_eqxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1271,7 +1270,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eq_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)
+#define Q6_Q_vcmp_eq_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1282,7 +1281,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)
+#define Q6_Q_vcmp_eqand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1293,7 +1292,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)
+#define Q6_Q_vcmp_eqor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1304,7 +1303,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)
+#define Q6_Q_vcmp_eqxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1315,7 +1314,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eq_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)
+#define Q6_Q_vcmp_eq_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1326,7 +1325,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)
+#define Q6_Q_vcmp_eqand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1337,7 +1336,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)
+#define Q6_Q_vcmp_eqor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1348,7 +1347,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)
+#define Q6_Q_vcmp_eqxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1359,7 +1358,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)
+#define Q6_Q_vcmp_gt_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1370,7 +1369,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)
+#define Q6_Q_vcmp_gtand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1381,7 +1380,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)
+#define Q6_Q_vcmp_gtor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1392,7 +1391,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)
+#define Q6_Q_vcmp_gtxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1403,7 +1402,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)
+#define Q6_Q_vcmp_gt_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1414,7 +1413,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)
+#define Q6_Q_vcmp_gtand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1425,7 +1424,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)
+#define Q6_Q_vcmp_gtor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1436,7 +1435,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)
+#define Q6_Q_vcmp_gtxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1447,7 +1446,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)
+#define Q6_Q_vcmp_gt_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1458,7 +1457,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)
+#define Q6_Q_vcmp_gtand_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1469,7 +1468,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)
+#define Q6_Q_vcmp_gtor_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1480,7 +1479,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)
+#define Q6_Q_vcmp_gtxacc_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1491,7 +1490,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)
+#define Q6_Q_vcmp_gt_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1502,7 +1501,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)
+#define Q6_Q_vcmp_gtand_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1513,7 +1512,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)
+#define Q6_Q_vcmp_gtor_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1524,7 +1523,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)
+#define Q6_Q_vcmp_gtxacc_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1535,7 +1534,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)
+#define Q6_Q_vcmp_gt_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1546,7 +1545,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)
+#define Q6_Q_vcmp_gtand_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1557,7 +1556,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)
+#define Q6_Q_vcmp_gtor_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1568,7 +1567,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)
+#define Q6_Q_vcmp_gtxacc_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1579,7 +1578,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)
+#define Q6_Q_vcmp_gt_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1590,7 +1589,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)
+#define Q6_Q_vcmp_gtand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1601,7 +1600,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)
+#define Q6_Q_vcmp_gtor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1612,7 +1611,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)
+#define Q6_Q_vcmp_gtxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1623,7 +1622,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vinsert_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)
+#define Q6_Vw_vinsert_VwR(Vx,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)(Vx,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1634,7 +1633,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vlalign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)
+#define Q6_V_vlalign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1645,7 +1644,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vlalign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)
+#define Q6_V_vlalign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1656,7 +1655,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vlsr_VuhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)
+#define Q6_Vuh_vlsr_VuhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1667,7 +1666,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vlsr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)
+#define Q6_Vh_vlsr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1678,7 +1677,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vlsr_VuwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)
+#define Q6_Vuw_vlsr_VuwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1689,7 +1688,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vlsr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)
+#define Q6_Vw_vlsr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1700,7 +1699,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32_VbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)
+#define Q6_Vb_vlut32_VbVbR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1711,7 +1710,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32or_VbVbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)
+#define Q6_Vb_vlut32or_VbVbVbR(Vx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)(Vx,Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1722,7 +1721,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16_VbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)
+#define Q6_Wh_vlut16_VbVhR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1733,7 +1732,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16or_WhVbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)
+#define Q6_Wh_vlut16or_WhVbVhR(Vxx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)(Vxx,Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1744,7 +1743,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vmax_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)
+#define Q6_Vh_vmax_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1755,7 +1754,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vmax_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)
+#define Q6_Vub_vmax_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1766,7 +1765,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vmax_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)
+#define Q6_Vuh_vmax_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1777,7 +1776,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vmax_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)
+#define Q6_Vw_vmax_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1788,7 +1787,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vmin_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)
+#define Q6_Vh_vmin_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1799,7 +1798,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vmin_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)
+#define Q6_Vub_vmin_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1810,7 +1809,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vmin_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)
+#define Q6_Vuh_vmin_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1821,7 +1820,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vmin_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)
+#define Q6_Vw_vmin_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1832,7 +1831,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)
+#define Q6_Wh_vmpa_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1843,7 +1842,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpaacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)
+#define Q6_Wh_vmpaacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1854,7 +1853,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)
+#define Q6_Wh_vmpa_WubWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1865,7 +1864,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubWub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)
+#define Q6_Wh_vmpa_WubWub(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1876,7 +1875,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpa_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)
+#define Q6_Ww_vmpa_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1887,7 +1886,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpaacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)
+#define Q6_Ww_vmpaacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1898,7 +1897,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)
+#define Q6_Wh_vmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1909,7 +1908,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpyacc_WhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)
+#define Q6_Wh_vmpyacc_WhVubRb(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1920,7 +1919,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)
+#define Q6_Wh_vmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1931,7 +1930,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpyacc_WhVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)
+#define Q6_Wh_vmpyacc_WhVubVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1942,7 +1941,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)
+#define Q6_Wh_vmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1953,7 +1952,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpyacc_WhVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)
+#define Q6_Wh_vmpyacc_WhVbVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1964,7 +1963,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)
+#define Q6_Vw_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1975,7 +1974,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpy_VhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)
+#define Q6_Ww_vmpy_VhRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1986,29 +1985,29 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)
+#define Q6_Ww_vmpyacc_WwVhRh_sat(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat
    C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpy_VhRh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)
+#define Q6_Vh_vmpy_VhRh_s1_rnd_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat
    C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpy_VhRh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)
+#define Q6_Vh_vmpy_VhRh_s1_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2019,7 +2018,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpy_VhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)
+#define Q6_Ww_vmpy_VhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2030,7 +2029,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)
+#define Q6_Ww_vmpyacc_WwVhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2041,7 +2040,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpy_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)
+#define Q6_Ww_vmpy_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2052,18 +2051,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)
+#define Q6_Ww_vmpyacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat
    C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpy_VhVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)
+#define Q6_Vh_vmpy_VhVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2074,7 +2073,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyieo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)
+#define Q6_Vw_vmpyieo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2085,7 +2084,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyieacc_VwVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)
+#define Q6_Vw_vmpyieacc_VwVwVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2096,7 +2095,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyie_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)
+#define Q6_Vw_vmpyie_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2107,7 +2106,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyieacc_VwVwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)
+#define Q6_Vw_vmpyieacc_VwVwVuh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2118,7 +2117,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyi_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)
+#define Q6_Vh_vmpyi_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2129,7 +2128,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyiacc_VhVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)
+#define Q6_Vh_vmpyiacc_VhVhVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2140,7 +2139,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyi_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)
+#define Q6_Vh_vmpyi_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2151,7 +2150,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyiacc_VhVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)
+#define Q6_Vh_vmpyiacc_VhVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2162,7 +2161,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyio_VwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)
+#define Q6_Vw_vmpyio_VwVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2173,7 +2172,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyi_VwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)
+#define Q6_Vw_vmpyi_VwRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2184,7 +2183,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyiacc_VwVwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)
+#define Q6_Vw_vmpyiacc_VwVwRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2195,7 +2194,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyi_VwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)
+#define Q6_Vw_vmpyi_VwRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2206,7 +2205,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyiacc_VwVwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)
+#define Q6_Vw_vmpyiacc_VwVwRh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2217,7 +2216,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyo_VwVh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)
+#define Q6_Vw_vmpyo_VwVh_s1_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2228,7 +2227,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)
+#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2239,7 +2238,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2250,7 +2249,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2261,7 +2260,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)
+#define Q6_Wuh_vmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2272,7 +2271,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpyacc_WuhVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)
+#define Q6_Wuh_vmpyacc_WuhVubRub(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2283,7 +2282,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)
+#define Q6_Wuh_vmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2294,7 +2293,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpyacc_WuhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)
+#define Q6_Wuh_vmpyacc_WuhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2305,7 +2304,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpy_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)
+#define Q6_Wuw_vmpy_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2316,7 +2315,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpyacc_WuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)
+#define Q6_Wuw_vmpyacc_WuwVuhRuh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2327,7 +2326,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpy_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)
+#define Q6_Wuw_vmpy_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2338,7 +2337,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpyacc_WuwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)
+#define Q6_Wuw_vmpyacc_WuwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2349,7 +2348,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vmux_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)
+#define Q6_V_vmux_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2360,7 +2359,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vnavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)
+#define Q6_Vh_vnavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2371,7 +2370,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vnavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)
+#define Q6_Vb_vnavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2382,7 +2381,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vnavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)
+#define Q6_Vw_vnavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2393,7 +2392,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vnormamt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)
+#define Q6_Vh_vnormamt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2404,7 +2403,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vnormamt_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)
+#define Q6_Vw_vnormamt_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2415,7 +2414,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vnot_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)
+#define Q6_V_vnot_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2426,7 +2425,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)
+#define Q6_V_vor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2437,7 +2436,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vpacke_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)
+#define Q6_Vb_vpacke_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2448,7 +2447,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpacke_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)
+#define Q6_Vh_vpacke_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2459,7 +2458,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)
+#define Q6_Vb_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2470,7 +2469,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)
+#define Q6_Vub_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2481,7 +2480,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vpacko_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)
+#define Q6_Vb_vpacko_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2492,7 +2491,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpacko_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)
+#define Q6_Vh_vpacko_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2503,7 +2502,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)
+#define Q6_Vh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2514,7 +2513,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)
+#define Q6_Vuh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2525,7 +2524,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpopcount_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)
+#define Q6_Vh_vpopcount_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2536,7 +2535,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vrdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)
+#define Q6_V_vrdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2547,7 +2546,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)
+#define Q6_Vw_vrmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2558,7 +2557,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpyacc_VwVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)
+#define Q6_Vw_vrmpyacc_VwVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2569,7 +2568,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vrmpy_WubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)
+#define Q6_Ww_vrmpy_WubRbI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)(Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2580,7 +2579,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vrmpyacc_WwWubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)
+#define Q6_Ww_vrmpyacc_WwWubRbI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)(Vxx,Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2591,18 +2590,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)
+#define Q6_Vw_vrmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vrmpy(Vu32.ub,Vv32.b)
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpyacc_VwVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)
+#define Q6_Vw_vrmpyacc_VwVubVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2613,18 +2612,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)
+#define Q6_Vw_vrmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vrmpy(Vu32.b,Vv32.b)
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpyacc_VwVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)
+#define Q6_Vw_vrmpyacc_VwVbVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2635,7 +2634,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)
+#define Q6_Vuw_vrmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2646,7 +2645,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpyacc_VuwVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)
+#define Q6_Vuw_vrmpyacc_VuwVubRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2657,7 +2656,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrmpy_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)
+#define Q6_Wuw_vrmpy_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)(Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2668,7 +2667,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrmpyacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)
+#define Q6_Wuw_vrmpyacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)(Vxx,Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2679,18 +2678,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)
+#define Q6_Vuw_vrmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)
    C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpyacc_VuwVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)
+#define Q6_Vuw_vrmpyacc_VuwVubVub(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2701,7 +2700,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vror_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)
+#define Q6_V_vror_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2712,7 +2711,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)
+#define Q6_Vb_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2723,7 +2722,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)
+#define Q6_Vub_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2734,7 +2733,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)
+#define Q6_Vh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2745,7 +2744,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)
+#define Q6_Vuh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2756,7 +2755,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrsad_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)
+#define Q6_Wuw_vrsad_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)(Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2767,7 +2766,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrsadacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)
+#define Q6_Wuw_vrsadacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)(Vxx,Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2778,7 +2777,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vsat_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)
+#define Q6_Vub_vsat_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2789,7 +2788,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vsat_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)
+#define Q6_Vh_vsat_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2800,7 +2799,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vsxt_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)
+#define Q6_Wh_vsxt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2811,7 +2810,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vsxt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)
+#define Q6_Ww_vsxt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2822,7 +2821,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vshuffe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)
+#define Q6_Vh_vshuffe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2833,7 +2832,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vshuff_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)
+#define Q6_Vb_vshuff_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2844,7 +2843,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vshuffe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)
+#define Q6_Vb_vshuffe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2855,7 +2854,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vshuff_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)
+#define Q6_Vh_vshuff_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2866,7 +2865,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vshuffo_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)
+#define Q6_Vb_vshuffo_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2877,7 +2876,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vshuff_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)
+#define Q6_W_vshuff_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2888,7 +2887,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vshuffoe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)
+#define Q6_Wb_vshuffoe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2899,7 +2898,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vshuffoe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)
+#define Q6_Wh_vshuffoe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2910,7 +2909,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vshuffo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)
+#define Q6_Vh_vshuffo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2921,7 +2920,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vsub_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)
+#define Q6_Vb_vsub_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2932,7 +2931,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vsub_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)
+#define Q6_Wb_vsub_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2943,7 +2942,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condnac_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)
+#define Q6_Vb_condnac_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2954,7 +2953,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condnac_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)
+#define Q6_Vb_condnac_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2965,7 +2964,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)
+#define Q6_Vh_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2976,7 +2975,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vsub_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)
+#define Q6_Wh_vsub_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2987,7 +2986,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condnac_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)
+#define Q6_Vh_condnac_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2998,7 +2997,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condnac_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)
+#define Q6_Vh_condnac_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3009,7 +3008,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vsub_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)
+#define Q6_Vh_vsub_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3020,7 +3019,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vsub_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)
+#define Q6_Wh_vsub_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3031,7 +3030,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)
+#define Q6_Ww_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3042,7 +3041,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vsub_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)
+#define Q6_Wh_vsub_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3053,7 +3052,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vsub_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)
+#define Q6_Vub_vsub_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3064,7 +3063,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wub_vsub_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)
+#define Q6_Wub_vsub_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3075,7 +3074,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vsub_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)
+#define Q6_Vuh_vsub_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3086,7 +3085,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vsub_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)
+#define Q6_Wuh_vsub_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3097,7 +3096,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vsub_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)
+#define Q6_Ww_vsub_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3108,7 +3107,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsub_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)
+#define Q6_Vw_vsub_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3119,7 +3118,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vsub_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)
+#define Q6_Ww_vsub_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3130,7 +3129,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condnac_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)
+#define Q6_Vw_condnac_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3141,7 +3140,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condnac_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)
+#define Q6_Vw_condnac_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3152,7 +3151,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsub_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)
+#define Q6_Vw_vsub_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3163,7 +3162,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vsub_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)
+#define Q6_Ww_vsub_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3174,7 +3173,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vswap_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)
+#define Q6_W_vswap_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3185,7 +3184,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpy_WbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)
+#define Q6_Wh_vtmpy_WbRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3196,7 +3195,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpyacc_WhWbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)
+#define Q6_Wh_vtmpyacc_WhWbRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3207,7 +3206,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)
+#define Q6_Wh_vtmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3218,7 +3217,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)
+#define Q6_Wh_vtmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3229,7 +3228,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vtmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)
+#define Q6_Ww_vtmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3240,7 +3239,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vtmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)
+#define Q6_Ww_vtmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3251,7 +3250,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vunpack_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)
+#define Q6_Wh_vunpack_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3262,7 +3261,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vunpack_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)
+#define Q6_Ww_vunpack_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3273,7 +3272,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vunpackoor_WhVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)
+#define Q6_Wh_vunpackoor_WhVb(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)(Vxx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3284,7 +3283,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vunpackoor_WwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)
+#define Q6_Ww_vunpackoor_WwVh(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)(Vxx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3295,7 +3294,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vunpack_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)
+#define Q6_Wuh_vunpack_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3306,7 +3305,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vunpack_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)
+#define Q6_Wuw_vunpack_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3317,7 +3316,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vxor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)
+#define Q6_V_vxor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3328,7 +3327,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vzxt_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)
+#define Q6_Wuh_vzxt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3339,7 +3338,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vzxt_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)
+#define Q6_Wuw_vzxt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 62
@@ -3350,7 +3349,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vb_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)
+#define Q6_Vb_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)(Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3361,7 +3360,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)
+#define Q6_Vh_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)(Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3372,7 +3371,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vsetq2_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)
+#define Q6_Q_vsetq2_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)(Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3383,7 +3382,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Qb_vshuffe_QhQh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)
+#define Q6_Qb_vshuffe_QhQh(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3394,7 +3393,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Qh_vshuffe_QwQw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)
+#define Q6_Qh_vshuffe_QwQw(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3405,7 +3404,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vadd_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)
+#define Q6_Vb_vadd_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3416,7 +3415,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vadd_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)
+#define Q6_Wb_vadd_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3427,7 +3426,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)
+#define Q6_Vw_vadd_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)(Vu,Vv,Qx)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3438,7 +3437,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vadd_vclb_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)
+#define Q6_Vh_vadd_vclb_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3449,7 +3448,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_vclb_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)
+#define Q6_Vw_vadd_vclb_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3460,7 +3459,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vaddacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)
+#define Q6_Ww_vaddacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3471,7 +3470,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vaddacc_WhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)
+#define Q6_Wh_vaddacc_WhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3482,7 +3481,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vadd_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)
+#define Q6_Vub_vadd_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3493,7 +3492,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vaddacc_WwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)
+#define Q6_Ww_vaddacc_WwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3504,7 +3503,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vadd_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)
+#define Q6_Vuw_vadd_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3515,7 +3514,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vadd_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)
+#define Q6_Wuw_vadd_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3526,7 +3525,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vand_QnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)
+#define Q6_V_vand_QnR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3537,7 +3536,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vandor_VQnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)
+#define Q6_V_vandor_VQnR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3548,7 +3547,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vand_QnV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)
+#define Q6_V_vand_QnV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3559,7 +3558,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vand_QV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)
+#define Q6_V_vand_QV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3570,7 +3569,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)
+#define Q6_Vb_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3581,7 +3580,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VuwVuwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)
+#define Q6_Vuh_vasr_VuwVuwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3592,7 +3591,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)
+#define Q6_Vuh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3603,7 +3602,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vlsr_VubR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)
+#define Q6_Vub_vlsr_VubR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3614,7 +3613,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32_VbVbR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)
+#define Q6_Vb_vlut32_VbVbR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3625,7 +3624,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32or_VbVbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)
+#define Q6_Vb_vlut32or_VbVbVbI(Vx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)(Vx,Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3636,7 +3635,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32_VbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)
+#define Q6_Vb_vlut32_VbVbI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3647,7 +3646,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16_VbVhR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)
+#define Q6_Wh_vlut16_VbVhR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3658,7 +3657,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16or_WhVbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)
+#define Q6_Wh_vlut16or_WhVbVhI(Vxx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)(Vxx,Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3669,7 +3668,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16_VbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)
+#define Q6_Wh_vlut16_VbVhI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3680,7 +3679,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vmax_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)
+#define Q6_Vb_vmax_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3691,7 +3690,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vmin_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)
+#define Q6_Vb_vmin_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3702,7 +3701,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpa_WuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)
+#define Q6_Ww_vmpa_WuhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3713,7 +3712,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpaacc_WwWuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)
+#define Q6_Ww_vmpaacc_WwWuhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3724,7 +3723,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_W_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)
+#define Q6_W_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3735,7 +3734,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyi_VwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)
+#define Q6_Vw_vmpyi_VwRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3746,7 +3745,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyiacc_VwVwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)
+#define Q6_Vw_vmpyiacc_VwVwRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3757,7 +3756,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_W_vmpyoacc_WVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)
+#define Q6_W_vmpyoacc_WVwVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3768,7 +3767,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vround_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)
+#define Q6_Vub_vround_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3779,7 +3778,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vround_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)
+#define Q6_Vuh_vround_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3790,7 +3789,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vsat_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)
+#define Q6_Vuh_vsat_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3801,7 +3800,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vsub_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)
+#define Q6_Vb_vsub_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3812,7 +3811,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vsub_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)
+#define Q6_Wb_vsub_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3823,7 +3822,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsub_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)
+#define Q6_Vw_vsub_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)(Vu,Vv,Qx)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3834,7 +3833,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vsub_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)
+#define Q6_Vub_vsub_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3845,7 +3844,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vsub_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)
+#define Q6_Vuw_vsub_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3856,7 +3855,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vsub_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)
+#define Q6_Wuw_vsub_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 65
@@ -3867,7 +3866,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vabs_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)
+#define Q6_Vb_vabs_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3878,7 +3877,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vabs_Vb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)
+#define Q6_Vb_vabs_Vb_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3889,7 +3888,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vaslacc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)
+#define Q6_Vh_vaslacc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3900,7 +3899,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasracc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)
+#define Q6_Vh_vasracc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3911,7 +3910,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VuhVuhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)
+#define Q6_Vub_vasr_VuhVuhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3922,7 +3921,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VuhVuhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)
+#define Q6_Vub_vasr_VuhVuhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3933,7 +3932,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VuwVuwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)
+#define Q6_Vuh_vasr_VuwVuwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3944,7 +3943,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)
+#define Q6_Vb_vavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3955,7 +3954,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vavg_VbVb_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)
+#define Q6_Vb_vavg_VbVb_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3966,7 +3965,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vavg_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)
+#define Q6_Vuw_vavg_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3977,7 +3976,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vavg_VuwVuw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)
+#define Q6_Vuw_vavg_VuwVuw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3988,7 +3987,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)
+#define Q6_W_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)()
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3999,7 +3998,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_ARMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)
+#define Q6_vgather_ARMVh(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)(Rs,Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4010,7 +4009,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_AQRMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)
+#define Q6_vgather_AQRMVh(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4021,7 +4020,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_ARMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)
+#define Q6_vgather_ARMWw(Rs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)(Rs,Rt,Mu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4032,7 +4031,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_AQRMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)
+#define Q6_vgather_AQRMWw(Rs,Qs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4043,7 +4042,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_ARMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)
+#define Q6_vgather_ARMVw(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)(Rs,Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4054,7 +4053,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_AQRMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)
+#define Q6_vgather_AQRMVw(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4065,7 +4064,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vlut4_VuhPh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)
+#define Q6_Vh_vlut4_VuhPh(Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)(Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4076,7 +4075,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)
+#define Q6_Wh_vmpa_WubRub(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4087,7 +4086,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpaacc_WhWubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)
+#define Q6_Wh_vmpaacc_WhWubRub(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4098,7 +4097,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vmpa_VhVhVhPh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)
+#define Q6_Vh_vmpa_VhVhVhPh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)(Vx,Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4109,7 +4108,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vmpa_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)
+#define Q6_Vh_vmpa_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)(Vx,Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4120,7 +4119,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vmps_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)
+#define Q6_Vh_vmps_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)(Vx,Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4131,7 +4130,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)
+#define Q6_Ww_vmpyacc_WwVhRh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4142,7 +4141,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vmpye_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)
+#define Q6_Vuw_vmpye_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4153,7 +4152,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vmpyeacc_VuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)
+#define Q6_Vuw_vmpyeacc_VuwVuhRuh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4164,7 +4163,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vnavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)
+#define Q6_Vb_vnavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4175,7 +4174,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)
+#define Q6_Vb_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4186,7 +4185,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)
+#define Q6_Vh_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4197,7 +4196,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)
+#define Q6_Vw_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4208,7 +4207,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)
+#define Q6_vscatter_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4219,7 +4218,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatteracc_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)
+#define Q6_vscatteracc_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4230,7 +4229,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_QRMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)
+#define Q6_vscatter_QRMVhV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4241,7 +4240,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)
+#define Q6_vscatter_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)(Rt,Mu,Vvv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4252,7 +4251,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatteracc_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)
+#define Q6_vscatteracc_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)(Rt,Mu,Vvv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4263,7 +4262,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_QRMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)
+#define Q6_vscatter_QRMWwV(Qs,Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4274,7 +4273,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)
+#define Q6_vscatter_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4285,7 +4284,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatteracc_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)
+#define Q6_vscatteracc_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4296,7 +4295,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_QRMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)
+#define Q6_vscatter_QRMVwV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 66
@@ -4307,7 +4306,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVwQ_carry_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)
+#define Q6_Vw_vadd_VwVwQ_carry_sat(Vu,Vv,Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)(Vu,Vv,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 66
@@ -4318,7 +4317,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vasrinto_WwVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)
+#define Q6_Ww_vasrinto_WwVwVw(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 66
@@ -4329,7 +4328,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vrotr_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)
+#define Q6_Vuw_vrotr_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 66
@@ -4340,7 +4339,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsatdw_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)
+#define Q6_Vw_vsatdw_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 68
@@ -4351,7 +4350,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpy_WubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)
+#define Q6_Ww_v6mpy_WubWbI_h(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)(Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
 #if __HVX_ARCH__ >= 68
@@ -4362,7 +4361,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpyacc_WwWubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)
+#define Q6_Ww_v6mpyacc_WwWubWbI_h(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)(Vxx,Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
 #if __HVX_ARCH__ >= 68
@@ -4373,7 +4372,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpy_WubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)
+#define Q6_Ww_v6mpy_WubWbI_v(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)(Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
 #if __HVX_ARCH__ >= 68
@@ -4384,9 +4383,801 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpyacc_WwWubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)
+#define Q6_Ww_v6mpyacc_WwWubWbI_v(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)(Vxx,Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vabs(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vabs_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vabs_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vabs(Vu32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vabs_Vsf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vabs_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_sf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vadd(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vadd(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vadd_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vadd_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vadd_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vadd_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vadd(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vadd(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vadd(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.w=vfmv(Vu32.w)
+   C Intrinsic Prototype: HVX_Vector Q6_Vw_vfmv_Vw(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vw_vfmv_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign_fp)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=Vu32.qf16
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Vqf16(HVX_Vector Vu)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_equals_Vqf16(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf16)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=Vuu32.qf32
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Wqf32(HVX_VectorPair Vuu)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_equals_Wqf32(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf32)(Vuu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=Vu32.qf32
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_equals_Vqf32(HVX_Vector Vu)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vsf_equals_Vqf32(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_sf_qf32)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.b=vcvt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vb_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vb_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_b_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.h=vcvt(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vh_vcvt_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_h_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.hf=vcvt(Vu32.b)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vb(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Whf_vcvt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_b)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vcvt(Vu32.h)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vh(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vcvt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_h)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vcvt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vcvt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.hf=vcvt(Vu32.ub)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vub(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Whf_vcvt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_ub)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vcvt(Vu32.uh)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vuh(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vcvt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_uh)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vcvt(Vu32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vcvt_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_sf_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.ub=vcvt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vub_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vub_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_ub_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vcvt(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcvt_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vuh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_uh_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vdmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpyacc_VsfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vdmpyacc_VsfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf_acc)(Vx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vfmax(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vfmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vfmax(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vfmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vfmin(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vfmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vfmin(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vfmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vfneg(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfneg_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vfneg_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vfneg(Vu32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfneg_Vsf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vfneg_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_sf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qd4=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf)(Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qd4=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf)(Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vmax(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_vmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vmax(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vsf_vmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vmin(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_vmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vmin(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vsf_vmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpyacc_VhfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vmpyacc_VhfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf_acc)(Vx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf16_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf16_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf16_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_mix_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf32_vmpy_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wqf32_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wqf32_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_mix_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wqf32_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf32_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpyacc_WsfVhfVhf(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vmpyacc_WsfVhfVhf(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf_acc)(Vxx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vmpy(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vsub(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vsub(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vsub_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vsub_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vsub_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vsub_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vsub(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vsub(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vsub(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vub_vasr_WuhVub_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubrndsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vub_vasr_WuhVub_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vuh_vasr_WwVuh_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhrndsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vuh_vasr_WwVuh_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmpy_VuhVuh_rs16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vuh_vmpy_VuhVuh_rs16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhvs)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
 #endif /* __HVX__ */
 
 #endif
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ia32intrin.h b/darwin-x86/lib64/clang/14.0.6/include/ia32intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/ia32intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/ia32intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/immintrin.h b/darwin-x86/lib64/clang/14.0.6/include/immintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/immintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/immintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/intrin.h b/darwin-x86/lib64/clang/14.0.6/include/intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/inttypes.h b/darwin-x86/lib64/clang/14.0.6/include/inttypes.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/inttypes.h
rename to darwin-x86/lib64/clang/14.0.6/include/inttypes.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/invpcidintrin.h b/darwin-x86/lib64/clang/14.0.6/include/invpcidintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/invpcidintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/invpcidintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/iso646.h b/darwin-x86/lib64/clang/14.0.6/include/iso646.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/iso646.h
rename to darwin-x86/lib64/clang/14.0.6/include/iso646.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/keylockerintrin.h b/darwin-x86/lib64/clang/14.0.6/include/keylockerintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/keylockerintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/keylockerintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/limits.h b/darwin-x86/lib64/clang/14.0.6/include/limits.h
similarity index 81%
rename from darwin-x86/lib64/clang/14.0.2/include/limits.h
rename to darwin-x86/lib64/clang/14.0.6/include/limits.h
index c653580..c2d3a7c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/limits.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/limits.h
@@ -62,6 +62,24 @@
 
 #define CHAR_BIT  __CHAR_BIT__
 
+/* C2x 5.2.4.2.1 */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+#define BOOL_WIDTH   __BOOL_WIDTH__
+#define CHAR_WIDTH   CHAR_BIT
+#define SCHAR_WIDTH  CHAR_BIT
+#define UCHAR_WIDTH  CHAR_BIT
+#define USHRT_WIDTH  __SHRT_WIDTH__
+#define SHRT_WIDTH   __SHRT_WIDTH__
+#define UINT_WIDTH   __INT_WIDTH__
+#define INT_WIDTH    __INT_WIDTH__
+#define ULONG_WIDTH  __LONG_WIDTH__
+#define LONG_WIDTH   __LONG_WIDTH__
+#define ULLONG_WIDTH __LLONG_WIDTH__
+#define LLONG_WIDTH  __LLONG_WIDTH__
+#endif
+
 #ifdef __CHAR_UNSIGNED__  /* -funsigned-char */
 #define CHAR_MIN 0
 #define CHAR_MAX UCHAR_MAX
diff --git a/darwin-x86/lib64/clang/14.0.2/include/lwpintrin.h b/darwin-x86/lib64/clang/14.0.6/include/lwpintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/lwpintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/lwpintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/lzcntintrin.h b/darwin-x86/lib64/clang/14.0.6/include/lzcntintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/lzcntintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/lzcntintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/mm3dnow.h b/darwin-x86/lib64/clang/14.0.6/include/mm3dnow.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/mm3dnow.h
rename to darwin-x86/lib64/clang/14.0.6/include/mm3dnow.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/mm_malloc.h b/darwin-x86/lib64/clang/14.0.6/include/mm_malloc.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/mm_malloc.h
rename to darwin-x86/lib64/clang/14.0.6/include/mm_malloc.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/mmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/mmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/mmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/mmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/module.modulemap b/darwin-x86/lib64/clang/14.0.6/include/module.modulemap
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/module.modulemap
rename to darwin-x86/lib64/clang/14.0.6/include/module.modulemap
diff --git a/darwin-x86/lib64/clang/14.0.2/include/movdirintrin.h b/darwin-x86/lib64/clang/14.0.6/include/movdirintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/movdirintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/movdirintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/msa.h b/darwin-x86/lib64/clang/14.0.6/include/msa.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/msa.h
rename to darwin-x86/lib64/clang/14.0.6/include/msa.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/mwaitxintrin.h b/darwin-x86/lib64/clang/14.0.6/include/mwaitxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/mwaitxintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/mwaitxintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/nmmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/nmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/nmmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/nmmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/opencl-c-base.h b/darwin-x86/lib64/clang/14.0.6/include/opencl-c-base.h
similarity index 98%
rename from darwin-x86/lib64/clang/14.0.2/include/opencl-c-base.h
rename to darwin-x86/lib64/clang/14.0.6/include/opencl-c-base.h
index 9c81ddb..ad276dc 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/opencl-c-base.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/opencl-c-base.h
@@ -68,9 +68,16 @@
 // For the SPIR and SPIR-V target all features are supported.
 #if defined(__SPIR__) || defined(__SPIRV__)
 #define __opencl_c_atomic_scope_all_devices 1
+#define __opencl_c_read_write_images 1
 #endif // defined(__SPIR__)
 #endif // (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
 
+#if !defined(__opencl_c_generic_address_space)
+// Internal feature macro to provide named (global, local, private) address
+// space overloads for builtin functions that take a pointer argument.
+#define __opencl_c_named_address_space_builtins 1
+#endif // !defined(__opencl_c_generic_address_space)
+
 // built-in scalar data types:
 
 /**
@@ -498,12 +505,14 @@
 
 #define MAX_WORK_DIM 3
 
+#ifdef __opencl_c_device_enqueue
 typedef struct {
   unsigned int workDimension;
   size_t globalWorkOffset[MAX_WORK_DIM];
   size_t globalWorkSize[MAX_WORK_DIM];
   size_t localWorkSize[MAX_WORK_DIM];
 } ndrange_t;
+#endif // __opencl_c_device_enqueue
 
 #endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 
@@ -600,9 +609,11 @@
 // C++ for OpenCL - __remove_address_space
 #if defined(__OPENCL_CPP_VERSION__)
 template <typename _Tp> struct __remove_address_space { using type = _Tp; };
+#if defined(__opencl_c_generic_address_space)
 template <typename _Tp> struct __remove_address_space<__generic _Tp> {
   using type = _Tp;
 };
+#endif
 template <typename _Tp> struct __remove_address_space<__global _Tp> {
   using type = _Tp;
 };
diff --git a/darwin-x86/lib64/clang/14.0.2/include/opencl-c.h b/darwin-x86/lib64/clang/14.0.6/include/opencl-c.h
similarity index 96%
rename from darwin-x86/lib64/clang/14.0.2/include/opencl-c.h
rename to darwin-x86/lib64/clang/14.0.6/include/opencl-c.h
index 32af848..059a2ec 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/opencl-c.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/opencl-c.h
@@ -11,11 +11,11 @@
 
 #include "opencl-c-base.h"
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_images)
 #ifndef cl_khr_depth_images
 #define cl_khr_depth_images
 #endif //cl_khr_depth_images
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_images)
 
 #if __OPENCL_C_VERSION__ < CL_VERSION_2_0
 #ifdef cl_khr_3d_image_writes
@@ -7285,7 +7285,9 @@
 half8 __ovld fract(half8 x, half8 *iptr);
 half16 __ovld fract(half16 x, half16 *iptr);
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 float __ovld fract(float x, __global float *iptr);
 float2 __ovld fract(float2 x, __global float2 *iptr);
 float3 __ovld fract(float3 x, __global float3 *iptr);
@@ -7344,7 +7346,7 @@
 half8 __ovld fract(half8 x, __private half8 *iptr);
 half16 __ovld fract(half16 x, __private half16 *iptr);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * Extract mantissa and exponent from x. For each
@@ -7375,7 +7377,9 @@
 half8 __ovld frexp(half8 x, int8 *exp);
 half16 __ovld frexp(half16 x, int16 *exp);
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 float __ovld frexp(float x, __global int *exp);
 float2 __ovld frexp(float2 x, __global int2 *exp);
 float3 __ovld frexp(float3 x, __global int3 *exp);
@@ -7434,7 +7438,7 @@
 half8 __ovld frexp(half8 x, __private int8 *exp);
 half16 __ovld frexp(half16 x, __private int16 *exp);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * Compute the value of the square root of x^2 + y^2
@@ -7582,7 +7586,9 @@
 half8 __ovld lgamma_r(half8 x, int8 *signp);
 half16 __ovld lgamma_r(half16 x, int16 *signp);
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 float __ovld lgamma_r(float x, __global int *signp);
 float2 __ovld lgamma_r(float2 x, __global int2 *signp);
 float3 __ovld lgamma_r(float3 x, __global int3 *signp);
@@ -7641,7 +7647,7 @@
 half8 __ovld lgamma_r(half8 x, __private int8 *signp);
 half16 __ovld lgamma_r(half16 x, __private int16 *signp);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * Compute natural logarithm.
@@ -7888,7 +7894,9 @@
 half8 __ovld modf(half8 x, half8 *iptr);
 half16 __ovld modf(half16 x, half16 *iptr);
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 float __ovld modf(float x, __global float *iptr);
 float2 __ovld modf(float2 x, __global float2 *iptr);
 float3 __ovld modf(float3 x, __global float3 *iptr);
@@ -7947,7 +7955,7 @@
 half8 __ovld modf(half8 x, __private half8 *iptr);
 half16 __ovld modf(half16 x, __private half16 *iptr);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * Returns a quiet NaN. The nancode may be placed
@@ -8147,9 +8155,10 @@
 half4 __ovld remquo(half4 x, half4 y, int4 *quo);
 half8 __ovld remquo(half8 x, half8 y, int8 *quo);
 half16 __ovld remquo(half16 x, half16 y, int16 *quo);
-
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 float __ovld remquo(float x, float y, __global int *quo);
 float2 __ovld remquo(float2 x, float2 y, __global int2 *quo);
 float3 __ovld remquo(float3 x, float3 y, __global int3 *quo);
@@ -8208,7 +8217,7 @@
 half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
 half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 /**
  * Round to integral value (using round to nearest
  * even rounding mode) in floating-point format.
@@ -8372,7 +8381,9 @@
 half8 __ovld sincos(half8 x, half8 *cosval);
 half16 __ovld sincos(half16 x, half16 *cosval);
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 float __ovld sincos(float x, __global float *cosval);
 float2 __ovld sincos(float2 x, __global float2 *cosval);
 float3 __ovld sincos(float3 x, __global float3 *cosval);
@@ -8431,7 +8442,7 @@
 half8 __ovld sincos(half8 x, __private half8 *cosval);
 half16 __ovld sincos(half16 x, __private half16 *cosval);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * Compute hyperbolic sine.
@@ -11190,308 +11201,310 @@
  * 64-bit aligned if gentype is long, ulong, double.
  */
 
-char2 __ovld vload2(size_t offset, const __constant char *p);
-uchar2 __ovld vload2(size_t offset, const __constant uchar *p);
-short2 __ovld vload2(size_t offset, const __constant short *p);
-ushort2 __ovld vload2(size_t offset, const __constant ushort *p);
-int2 __ovld vload2(size_t offset, const __constant int *p);
-uint2 __ovld vload2(size_t offset, const __constant uint *p);
-long2 __ovld vload2(size_t offset, const __constant long *p);
-ulong2 __ovld vload2(size_t offset, const __constant ulong *p);
-float2 __ovld vload2(size_t offset, const __constant float *p);
-char3 __ovld vload3(size_t offset, const __constant char *p);
-uchar3 __ovld vload3(size_t offset, const __constant uchar *p);
-short3 __ovld vload3(size_t offset, const __constant short *p);
-ushort3 __ovld vload3(size_t offset, const __constant ushort *p);
-int3 __ovld vload3(size_t offset, const __constant int *p);
-uint3 __ovld vload3(size_t offset, const __constant uint *p);
-long3 __ovld vload3(size_t offset, const __constant long *p);
-ulong3 __ovld vload3(size_t offset, const __constant ulong *p);
-float3 __ovld vload3(size_t offset, const __constant float *p);
-char4 __ovld vload4(size_t offset, const __constant char *p);
-uchar4 __ovld vload4(size_t offset, const __constant uchar *p);
-short4 __ovld vload4(size_t offset, const __constant short *p);
-ushort4 __ovld vload4(size_t offset, const __constant ushort *p);
-int4 __ovld vload4(size_t offset, const __constant int *p);
-uint4 __ovld vload4(size_t offset, const __constant uint *p);
-long4 __ovld vload4(size_t offset, const __constant long *p);
-ulong4 __ovld vload4(size_t offset, const __constant ulong *p);
-float4 __ovld vload4(size_t offset, const __constant float *p);
-char8 __ovld vload8(size_t offset, const __constant char *p);
-uchar8 __ovld vload8(size_t offset, const __constant uchar *p);
-short8 __ovld vload8(size_t offset, const __constant short *p);
-ushort8 __ovld vload8(size_t offset, const __constant ushort *p);
-int8 __ovld vload8(size_t offset, const __constant int *p);
-uint8 __ovld vload8(size_t offset, const __constant uint *p);
-long8 __ovld vload8(size_t offset, const __constant long *p);
-ulong8 __ovld vload8(size_t offset, const __constant ulong *p);
-float8 __ovld vload8(size_t offset, const __constant float *p);
-char16 __ovld vload16(size_t offset, const __constant char *p);
-uchar16 __ovld vload16(size_t offset, const __constant uchar *p);
-short16 __ovld vload16(size_t offset, const __constant short *p);
-ushort16 __ovld vload16(size_t offset, const __constant ushort *p);
-int16 __ovld vload16(size_t offset, const __constant int *p);
-uint16 __ovld vload16(size_t offset, const __constant uint *p);
-long16 __ovld vload16(size_t offset, const __constant long *p);
-ulong16 __ovld vload16(size_t offset, const __constant ulong *p);
-float16 __ovld vload16(size_t offset, const __constant float *p);
+char2 __ovld __purefn vload2(size_t offset, const __constant char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const __constant uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const __constant short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const __constant ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const __constant int *p);
+uint2 __ovld __purefn vload2(size_t offset, const __constant uint *p);
+long2 __ovld __purefn vload2(size_t offset, const __constant long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const __constant ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const __constant float *p);
+char3 __ovld __purefn vload3(size_t offset, const __constant char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const __constant uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const __constant short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const __constant ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const __constant int *p);
+uint3 __ovld __purefn vload3(size_t offset, const __constant uint *p);
+long3 __ovld __purefn vload3(size_t offset, const __constant long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const __constant ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const __constant float *p);
+char4 __ovld __purefn vload4(size_t offset, const __constant char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const __constant uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const __constant short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const __constant ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const __constant int *p);
+uint4 __ovld __purefn vload4(size_t offset, const __constant uint *p);
+long4 __ovld __purefn vload4(size_t offset, const __constant long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const __constant ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const __constant float *p);
+char8 __ovld __purefn vload8(size_t offset, const __constant char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const __constant uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const __constant short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const __constant ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const __constant int *p);
+uint8 __ovld __purefn vload8(size_t offset, const __constant uint *p);
+long8 __ovld __purefn vload8(size_t offset, const __constant long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const __constant ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const __constant float *p);
+char16 __ovld __purefn vload16(size_t offset, const __constant char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const __constant uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const __constant short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const __constant ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const __constant int *p);
+uint16 __ovld __purefn vload16(size_t offset, const __constant uint *p);
+long16 __ovld __purefn vload16(size_t offset, const __constant long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const __constant ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const __constant float *p);
 #ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __constant double *p);
-double3 __ovld vload3(size_t offset, const __constant double *p);
-double4 __ovld vload4(size_t offset, const __constant double *p);
-double8 __ovld vload8(size_t offset, const __constant double *p);
-double16 __ovld vload16(size_t offset, const __constant double *p);
+double2 __ovld __purefn vload2(size_t offset, const __constant double *p);
+double3 __ovld __purefn vload3(size_t offset, const __constant double *p);
+double4 __ovld __purefn vload4(size_t offset, const __constant double *p);
+double8 __ovld __purefn vload8(size_t offset, const __constant double *p);
+double16 __ovld __purefn vload16(size_t offset, const __constant double *p);
 #endif //cl_khr_fp64
 
 #ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __constant half *p);
-half2 __ovld vload2(size_t offset, const __constant half *p);
-half3 __ovld vload3(size_t offset, const __constant half *p);
-half4 __ovld vload4(size_t offset, const __constant half *p);
-half8 __ovld vload8(size_t offset, const __constant half *p);
-half16 __ovld vload16(size_t offset, const __constant half *p);
+half __ovld __purefn vload(size_t offset, const __constant half *p);
+half2 __ovld __purefn vload2(size_t offset, const __constant half *p);
+half3 __ovld __purefn vload3(size_t offset, const __constant half *p);
+half4 __ovld __purefn vload4(size_t offset, const __constant half *p);
+half8 __ovld __purefn vload8(size_t offset, const __constant half *p);
+half16 __ovld __purefn vload16(size_t offset, const __constant half *p);
 #endif //cl_khr_fp16
 
 #if defined(__opencl_c_generic_address_space)
-char2 __ovld vload2(size_t offset, const char *p);
-uchar2 __ovld vload2(size_t offset, const uchar *p);
-short2 __ovld vload2(size_t offset, const short *p);
-ushort2 __ovld vload2(size_t offset, const ushort *p);
-int2 __ovld vload2(size_t offset, const int *p);
-uint2 __ovld vload2(size_t offset, const uint *p);
-long2 __ovld vload2(size_t offset, const long *p);
-ulong2 __ovld vload2(size_t offset, const ulong *p);
-float2 __ovld vload2(size_t offset, const float *p);
-char3 __ovld vload3(size_t offset, const char *p);
-uchar3 __ovld vload3(size_t offset, const uchar *p);
-short3 __ovld vload3(size_t offset, const short *p);
-ushort3 __ovld vload3(size_t offset, const ushort *p);
-int3 __ovld vload3(size_t offset, const int *p);
-uint3 __ovld vload3(size_t offset, const uint *p);
-long3 __ovld vload3(size_t offset, const long *p);
-ulong3 __ovld vload3(size_t offset, const ulong *p);
-float3 __ovld vload3(size_t offset, const float *p);
-char4 __ovld vload4(size_t offset, const char *p);
-uchar4 __ovld vload4(size_t offset, const uchar *p);
-short4 __ovld vload4(size_t offset, const short *p);
-ushort4 __ovld vload4(size_t offset, const ushort *p);
-int4 __ovld vload4(size_t offset, const int *p);
-uint4 __ovld vload4(size_t offset, const uint *p);
-long4 __ovld vload4(size_t offset, const long *p);
-ulong4 __ovld vload4(size_t offset, const ulong *p);
-float4 __ovld vload4(size_t offset, const float *p);
-char8 __ovld vload8(size_t offset, const char *p);
-uchar8 __ovld vload8(size_t offset, const uchar *p);
-short8 __ovld vload8(size_t offset, const short *p);
-ushort8 __ovld vload8(size_t offset, const ushort *p);
-int8 __ovld vload8(size_t offset, const int *p);
-uint8 __ovld vload8(size_t offset, const uint *p);
-long8 __ovld vload8(size_t offset, const long *p);
-ulong8 __ovld vload8(size_t offset, const ulong *p);
-float8 __ovld vload8(size_t offset, const float *p);
-char16 __ovld vload16(size_t offset, const char *p);
-uchar16 __ovld vload16(size_t offset, const uchar *p);
-short16 __ovld vload16(size_t offset, const short *p);
-ushort16 __ovld vload16(size_t offset, const ushort *p);
-int16 __ovld vload16(size_t offset, const int *p);
-uint16 __ovld vload16(size_t offset, const uint *p);
-long16 __ovld vload16(size_t offset, const long *p);
-ulong16 __ovld vload16(size_t offset, const ulong *p);
-float16 __ovld vload16(size_t offset, const float *p);
+char2 __ovld __purefn vload2(size_t offset, const char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const int *p);
+uint2 __ovld __purefn vload2(size_t offset, const uint *p);
+long2 __ovld __purefn vload2(size_t offset, const long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const float *p);
+char3 __ovld __purefn vload3(size_t offset, const char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const int *p);
+uint3 __ovld __purefn vload3(size_t offset, const uint *p);
+long3 __ovld __purefn vload3(size_t offset, const long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const float *p);
+char4 __ovld __purefn vload4(size_t offset, const char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const int *p);
+uint4 __ovld __purefn vload4(size_t offset, const uint *p);
+long4 __ovld __purefn vload4(size_t offset, const long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const float *p);
+char8 __ovld __purefn vload8(size_t offset, const char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const int *p);
+uint8 __ovld __purefn vload8(size_t offset, const uint *p);
+long8 __ovld __purefn vload8(size_t offset, const long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const float *p);
+char16 __ovld __purefn vload16(size_t offset, const char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const int *p);
+uint16 __ovld __purefn vload16(size_t offset, const uint *p);
+long16 __ovld __purefn vload16(size_t offset, const long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const float *p);
 
 #ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const double *p);
-double3 __ovld vload3(size_t offset, const double *p);
-double4 __ovld vload4(size_t offset, const double *p);
-double8 __ovld vload8(size_t offset, const double *p);
-double16 __ovld vload16(size_t offset, const double *p);
+double2 __ovld __purefn vload2(size_t offset, const double *p);
+double3 __ovld __purefn vload3(size_t offset, const double *p);
+double4 __ovld __purefn vload4(size_t offset, const double *p);
+double8 __ovld __purefn vload8(size_t offset, const double *p);
+double16 __ovld __purefn vload16(size_t offset, const double *p);
 #endif //cl_khr_fp64
 
 #ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const half *p);
-half2 __ovld vload2(size_t offset, const half *p);
-half3 __ovld vload3(size_t offset, const half *p);
-half4 __ovld vload4(size_t offset, const half *p);
-half8 __ovld vload8(size_t offset, const half *p);
-half16 __ovld vload16(size_t offset, const half *p);
-#endif //cl_khr_fp16
-#else
-char2 __ovld vload2(size_t offset, const __global char *p);
-uchar2 __ovld vload2(size_t offset, const __global uchar *p);
-short2 __ovld vload2(size_t offset, const __global short *p);
-ushort2 __ovld vload2(size_t offset, const __global ushort *p);
-int2 __ovld vload2(size_t offset, const __global int *p);
-uint2 __ovld vload2(size_t offset, const __global uint *p);
-long2 __ovld vload2(size_t offset, const __global long *p);
-ulong2 __ovld vload2(size_t offset, const __global ulong *p);
-float2 __ovld vload2(size_t offset, const __global float *p);
-char3 __ovld vload3(size_t offset, const __global char *p);
-uchar3 __ovld vload3(size_t offset, const __global uchar *p);
-short3 __ovld vload3(size_t offset, const __global short *p);
-ushort3 __ovld vload3(size_t offset, const __global ushort *p);
-int3 __ovld vload3(size_t offset, const __global int *p);
-uint3 __ovld vload3(size_t offset, const __global uint *p);
-long3 __ovld vload3(size_t offset, const __global long *p);
-ulong3 __ovld vload3(size_t offset, const __global ulong *p);
-float3 __ovld vload3(size_t offset, const __global float *p);
-char4 __ovld vload4(size_t offset, const __global char *p);
-uchar4 __ovld vload4(size_t offset, const __global uchar *p);
-short4 __ovld vload4(size_t offset, const __global short *p);
-ushort4 __ovld vload4(size_t offset, const __global ushort *p);
-int4 __ovld vload4(size_t offset, const __global int *p);
-uint4 __ovld vload4(size_t offset, const __global uint *p);
-long4 __ovld vload4(size_t offset, const __global long *p);
-ulong4 __ovld vload4(size_t offset, const __global ulong *p);
-float4 __ovld vload4(size_t offset, const __global float *p);
-char8 __ovld vload8(size_t offset, const __global char *p);
-uchar8 __ovld vload8(size_t offset, const __global uchar *p);
-short8 __ovld vload8(size_t offset, const __global short *p);
-ushort8 __ovld vload8(size_t offset, const __global ushort *p);
-int8 __ovld vload8(size_t offset, const __global int *p);
-uint8 __ovld vload8(size_t offset, const __global uint *p);
-long8 __ovld vload8(size_t offset, const __global long *p);
-ulong8 __ovld vload8(size_t offset, const __global ulong *p);
-float8 __ovld vload8(size_t offset, const __global float *p);
-char16 __ovld vload16(size_t offset, const __global char *p);
-uchar16 __ovld vload16(size_t offset, const __global uchar *p);
-short16 __ovld vload16(size_t offset, const __global short *p);
-ushort16 __ovld vload16(size_t offset, const __global ushort *p);
-int16 __ovld vload16(size_t offset, const __global int *p);
-uint16 __ovld vload16(size_t offset, const __global uint *p);
-long16 __ovld vload16(size_t offset, const __global long *p);
-ulong16 __ovld vload16(size_t offset, const __global ulong *p);
-float16 __ovld vload16(size_t offset, const __global float *p);
-char2 __ovld vload2(size_t offset, const __local char *p);
-uchar2 __ovld vload2(size_t offset, const __local uchar *p);
-short2 __ovld vload2(size_t offset, const __local short *p);
-ushort2 __ovld vload2(size_t offset, const __local ushort *p);
-int2 __ovld vload2(size_t offset, const __local int *p);
-uint2 __ovld vload2(size_t offset, const __local uint *p);
-long2 __ovld vload2(size_t offset, const __local long *p);
-ulong2 __ovld vload2(size_t offset, const __local ulong *p);
-float2 __ovld vload2(size_t offset, const __local float *p);
-char3 __ovld vload3(size_t offset, const __local char *p);
-uchar3 __ovld vload3(size_t offset, const __local uchar *p);
-short3 __ovld vload3(size_t offset, const __local short *p);
-ushort3 __ovld vload3(size_t offset, const __local ushort *p);
-int3 __ovld vload3(size_t offset, const __local int *p);
-uint3 __ovld vload3(size_t offset, const __local uint *p);
-long3 __ovld vload3(size_t offset, const __local long *p);
-ulong3 __ovld vload3(size_t offset, const __local ulong *p);
-float3 __ovld vload3(size_t offset, const __local float *p);
-char4 __ovld vload4(size_t offset, const __local char *p);
-uchar4 __ovld vload4(size_t offset, const __local uchar *p);
-short4 __ovld vload4(size_t offset, const __local short *p);
-ushort4 __ovld vload4(size_t offset, const __local ushort *p);
-int4 __ovld vload4(size_t offset, const __local int *p);
-uint4 __ovld vload4(size_t offset, const __local uint *p);
-long4 __ovld vload4(size_t offset, const __local long *p);
-ulong4 __ovld vload4(size_t offset, const __local ulong *p);
-float4 __ovld vload4(size_t offset, const __local float *p);
-char8 __ovld vload8(size_t offset, const __local char *p);
-uchar8 __ovld vload8(size_t offset, const __local uchar *p);
-short8 __ovld vload8(size_t offset, const __local short *p);
-ushort8 __ovld vload8(size_t offset, const __local ushort *p);
-int8 __ovld vload8(size_t offset, const __local int *p);
-uint8 __ovld vload8(size_t offset, const __local uint *p);
-long8 __ovld vload8(size_t offset, const __local long *p);
-ulong8 __ovld vload8(size_t offset, const __local ulong *p);
-float8 __ovld vload8(size_t offset, const __local float *p);
-char16 __ovld vload16(size_t offset, const __local char *p);
-uchar16 __ovld vload16(size_t offset, const __local uchar *p);
-short16 __ovld vload16(size_t offset, const __local short *p);
-ushort16 __ovld vload16(size_t offset, const __local ushort *p);
-int16 __ovld vload16(size_t offset, const __local int *p);
-uint16 __ovld vload16(size_t offset, const __local uint *p);
-long16 __ovld vload16(size_t offset, const __local long *p);
-ulong16 __ovld vload16(size_t offset, const __local ulong *p);
-float16 __ovld vload16(size_t offset, const __local float *p);
-char2 __ovld vload2(size_t offset, const __private char *p);
-uchar2 __ovld vload2(size_t offset, const __private uchar *p);
-short2 __ovld vload2(size_t offset, const __private short *p);
-ushort2 __ovld vload2(size_t offset, const __private ushort *p);
-int2 __ovld vload2(size_t offset, const __private int *p);
-uint2 __ovld vload2(size_t offset, const __private uint *p);
-long2 __ovld vload2(size_t offset, const __private long *p);
-ulong2 __ovld vload2(size_t offset, const __private ulong *p);
-float2 __ovld vload2(size_t offset, const __private float *p);
-char3 __ovld vload3(size_t offset, const __private char *p);
-uchar3 __ovld vload3(size_t offset, const __private uchar *p);
-short3 __ovld vload3(size_t offset, const __private short *p);
-ushort3 __ovld vload3(size_t offset, const __private ushort *p);
-int3 __ovld vload3(size_t offset, const __private int *p);
-uint3 __ovld vload3(size_t offset, const __private uint *p);
-long3 __ovld vload3(size_t offset, const __private long *p);
-ulong3 __ovld vload3(size_t offset, const __private ulong *p);
-float3 __ovld vload3(size_t offset, const __private float *p);
-char4 __ovld vload4(size_t offset, const __private char *p);
-uchar4 __ovld vload4(size_t offset, const __private uchar *p);
-short4 __ovld vload4(size_t offset, const __private short *p);
-ushort4 __ovld vload4(size_t offset, const __private ushort *p);
-int4 __ovld vload4(size_t offset, const __private int *p);
-uint4 __ovld vload4(size_t offset, const __private uint *p);
-long4 __ovld vload4(size_t offset, const __private long *p);
-ulong4 __ovld vload4(size_t offset, const __private ulong *p);
-float4 __ovld vload4(size_t offset, const __private float *p);
-char8 __ovld vload8(size_t offset, const __private char *p);
-uchar8 __ovld vload8(size_t offset, const __private uchar *p);
-short8 __ovld vload8(size_t offset, const __private short *p);
-ushort8 __ovld vload8(size_t offset, const __private ushort *p);
-int8 __ovld vload8(size_t offset, const __private int *p);
-uint8 __ovld vload8(size_t offset, const __private uint *p);
-long8 __ovld vload8(size_t offset, const __private long *p);
-ulong8 __ovld vload8(size_t offset, const __private ulong *p);
-float8 __ovld vload8(size_t offset, const __private float *p);
-char16 __ovld vload16(size_t offset, const __private char *p);
-uchar16 __ovld vload16(size_t offset, const __private uchar *p);
-short16 __ovld vload16(size_t offset, const __private short *p);
-ushort16 __ovld vload16(size_t offset, const __private ushort *p);
-int16 __ovld vload16(size_t offset, const __private int *p);
-uint16 __ovld vload16(size_t offset, const __private uint *p);
-long16 __ovld vload16(size_t offset, const __private long *p);
-ulong16 __ovld vload16(size_t offset, const __private ulong *p);
-float16 __ovld vload16(size_t offset, const __private float *p);
-
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __global double *p);
-double3 __ovld vload3(size_t offset, const __global double *p);
-double4 __ovld vload4(size_t offset, const __global double *p);
-double8 __ovld vload8(size_t offset, const __global double *p);
-double16 __ovld vload16(size_t offset, const __global double *p);
-double2 __ovld vload2(size_t offset, const __local double *p);
-double3 __ovld vload3(size_t offset, const __local double *p);
-double4 __ovld vload4(size_t offset, const __local double *p);
-double8 __ovld vload8(size_t offset, const __local double *p);
-double16 __ovld vload16(size_t offset, const __local double *p);
-double2 __ovld vload2(size_t offset, const __private double *p);
-double3 __ovld vload3(size_t offset, const __private double *p);
-double4 __ovld vload4(size_t offset, const __private double *p);
-double8 __ovld vload8(size_t offset, const __private double *p);
-double16 __ovld vload16(size_t offset, const __private double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __global half *p);
-half2 __ovld vload2(size_t offset, const __global half *p);
-half3 __ovld vload3(size_t offset, const __global half *p);
-half4 __ovld vload4(size_t offset, const __global half *p);
-half8 __ovld vload8(size_t offset, const __global half *p);
-half16 __ovld vload16(size_t offset, const __global half *p);
-half __ovld vload(size_t offset, const __local half *p);
-half2 __ovld vload2(size_t offset, const __local half *p);
-half3 __ovld vload3(size_t offset, const __local half *p);
-half4 __ovld vload4(size_t offset, const __local half *p);
-half8 __ovld vload8(size_t offset, const __local half *p);
-half16 __ovld vload16(size_t offset, const __local half *p);
-half __ovld vload(size_t offset, const __private half *p);
-half2 __ovld vload2(size_t offset, const __private half *p);
-half3 __ovld vload3(size_t offset, const __private half *p);
-half4 __ovld vload4(size_t offset, const __private half *p);
-half8 __ovld vload8(size_t offset, const __private half *p);
-half16 __ovld vload16(size_t offset, const __private half *p);
+half __ovld __purefn vload(size_t offset, const half *p);
+half2 __ovld __purefn vload2(size_t offset, const half *p);
+half3 __ovld __purefn vload3(size_t offset, const half *p);
+half4 __ovld __purefn vload4(size_t offset, const half *p);
+half8 __ovld __purefn vload8(size_t offset, const half *p);
+half16 __ovld __purefn vload16(size_t offset, const half *p);
 #endif //cl_khr_fp16
 #endif //defined(__opencl_c_generic_address_space)
 
+#if defined(__opencl_c_named_address_space_builtins)
+char2 __ovld __purefn vload2(size_t offset, const __global char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const __global uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const __global short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const __global ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const __global int *p);
+uint2 __ovld __purefn vload2(size_t offset, const __global uint *p);
+long2 __ovld __purefn vload2(size_t offset, const __global long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const __global ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const __global float *p);
+char3 __ovld __purefn vload3(size_t offset, const __global char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const __global uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const __global short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const __global ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const __global int *p);
+uint3 __ovld __purefn vload3(size_t offset, const __global uint *p);
+long3 __ovld __purefn vload3(size_t offset, const __global long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const __global ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const __global float *p);
+char4 __ovld __purefn vload4(size_t offset, const __global char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const __global uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const __global short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const __global ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const __global int *p);
+uint4 __ovld __purefn vload4(size_t offset, const __global uint *p);
+long4 __ovld __purefn vload4(size_t offset, const __global long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const __global ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const __global float *p);
+char8 __ovld __purefn vload8(size_t offset, const __global char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const __global uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const __global short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const __global ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const __global int *p);
+uint8 __ovld __purefn vload8(size_t offset, const __global uint *p);
+long8 __ovld __purefn vload8(size_t offset, const __global long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const __global ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const __global float *p);
+char16 __ovld __purefn vload16(size_t offset, const __global char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const __global uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const __global short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const __global ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const __global int *p);
+uint16 __ovld __purefn vload16(size_t offset, const __global uint *p);
+long16 __ovld __purefn vload16(size_t offset, const __global long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const __global ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const __global float *p);
+char2 __ovld __purefn vload2(size_t offset, const __local char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const __local uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const __local short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const __local ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const __local int *p);
+uint2 __ovld __purefn vload2(size_t offset, const __local uint *p);
+long2 __ovld __purefn vload2(size_t offset, const __local long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const __local ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const __local float *p);
+char3 __ovld __purefn vload3(size_t offset, const __local char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const __local uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const __local short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const __local ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const __local int *p);
+uint3 __ovld __purefn vload3(size_t offset, const __local uint *p);
+long3 __ovld __purefn vload3(size_t offset, const __local long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const __local ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const __local float *p);
+char4 __ovld __purefn vload4(size_t offset, const __local char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const __local uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const __local short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const __local ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const __local int *p);
+uint4 __ovld __purefn vload4(size_t offset, const __local uint *p);
+long4 __ovld __purefn vload4(size_t offset, const __local long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const __local ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const __local float *p);
+char8 __ovld __purefn vload8(size_t offset, const __local char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const __local uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const __local short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const __local ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const __local int *p);
+uint8 __ovld __purefn vload8(size_t offset, const __local uint *p);
+long8 __ovld __purefn vload8(size_t offset, const __local long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const __local ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const __local float *p);
+char16 __ovld __purefn vload16(size_t offset, const __local char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const __local uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const __local short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const __local ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const __local int *p);
+uint16 __ovld __purefn vload16(size_t offset, const __local uint *p);
+long16 __ovld __purefn vload16(size_t offset, const __local long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const __local ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const __local float *p);
+char2 __ovld __purefn vload2(size_t offset, const __private char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const __private uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const __private short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const __private ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const __private int *p);
+uint2 __ovld __purefn vload2(size_t offset, const __private uint *p);
+long2 __ovld __purefn vload2(size_t offset, const __private long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const __private ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const __private float *p);
+char3 __ovld __purefn vload3(size_t offset, const __private char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const __private uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const __private short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const __private ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const __private int *p);
+uint3 __ovld __purefn vload3(size_t offset, const __private uint *p);
+long3 __ovld __purefn vload3(size_t offset, const __private long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const __private ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const __private float *p);
+char4 __ovld __purefn vload4(size_t offset, const __private char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const __private uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const __private short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const __private ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const __private int *p);
+uint4 __ovld __purefn vload4(size_t offset, const __private uint *p);
+long4 __ovld __purefn vload4(size_t offset, const __private long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const __private ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const __private float *p);
+char8 __ovld __purefn vload8(size_t offset, const __private char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const __private uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const __private short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const __private ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const __private int *p);
+uint8 __ovld __purefn vload8(size_t offset, const __private uint *p);
+long8 __ovld __purefn vload8(size_t offset, const __private long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const __private ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const __private float *p);
+char16 __ovld __purefn vload16(size_t offset, const __private char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const __private uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const __private short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const __private ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const __private int *p);
+uint16 __ovld __purefn vload16(size_t offset, const __private uint *p);
+long16 __ovld __purefn vload16(size_t offset, const __private long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const __private ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const __private float *p);
+
+#ifdef cl_khr_fp64
+double2 __ovld __purefn vload2(size_t offset, const __global double *p);
+double3 __ovld __purefn vload3(size_t offset, const __global double *p);
+double4 __ovld __purefn vload4(size_t offset, const __global double *p);
+double8 __ovld __purefn vload8(size_t offset, const __global double *p);
+double16 __ovld __purefn vload16(size_t offset, const __global double *p);
+double2 __ovld __purefn vload2(size_t offset, const __local double *p);
+double3 __ovld __purefn vload3(size_t offset, const __local double *p);
+double4 __ovld __purefn vload4(size_t offset, const __local double *p);
+double8 __ovld __purefn vload8(size_t offset, const __local double *p);
+double16 __ovld __purefn vload16(size_t offset, const __local double *p);
+double2 __ovld __purefn vload2(size_t offset, const __private double *p);
+double3 __ovld __purefn vload3(size_t offset, const __private double *p);
+double4 __ovld __purefn vload4(size_t offset, const __private double *p);
+double8 __ovld __purefn vload8(size_t offset, const __private double *p);
+double16 __ovld __purefn vload16(size_t offset, const __private double *p);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half __ovld __purefn vload(size_t offset, const __global half *p);
+half2 __ovld __purefn vload2(size_t offset, const __global half *p);
+half3 __ovld __purefn vload3(size_t offset, const __global half *p);
+half4 __ovld __purefn vload4(size_t offset, const __global half *p);
+half8 __ovld __purefn vload8(size_t offset, const __global half *p);
+half16 __ovld __purefn vload16(size_t offset, const __global half *p);
+half __ovld __purefn vload(size_t offset, const __local half *p);
+half2 __ovld __purefn vload2(size_t offset, const __local half *p);
+half3 __ovld __purefn vload3(size_t offset, const __local half *p);
+half4 __ovld __purefn vload4(size_t offset, const __local half *p);
+half8 __ovld __purefn vload8(size_t offset, const __local half *p);
+half16 __ovld __purefn vload16(size_t offset, const __local half *p);
+half __ovld __purefn vload(size_t offset, const __private half *p);
+half2 __ovld __purefn vload2(size_t offset, const __private half *p);
+half3 __ovld __purefn vload3(size_t offset, const __private half *p);
+half4 __ovld __purefn vload4(size_t offset, const __private half *p);
+half8 __ovld __purefn vload8(size_t offset, const __private half *p);
+half16 __ovld __purefn vload16(size_t offset, const __private half *p);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
 #if defined(__opencl_c_generic_address_space)
 void __ovld vstore2(char2 data, size_t offset, char *p);
 void __ovld vstore2(uchar2 data, size_t offset, uchar *p);
@@ -11553,7 +11566,9 @@
 void __ovld vstore8(half8 data, size_t offset, half *p);
 void __ovld vstore16(half16 data, size_t offset, half *p);
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 void __ovld vstore2(char2 data, size_t offset, __global char *p);
 void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p);
 void __ovld vstore2(short2 data, size_t offset, __global short *p);
@@ -11726,7 +11741,7 @@
 void __ovld vstore8(half8 data, size_t offset, __private half *p);
 void __ovld vstore16(half16 data, size_t offset, __private half *p);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * Read sizeof (half) bytes of data from address
@@ -11736,15 +11751,17 @@
  * The read address computed as (p + offset)
  * must be 16-bit aligned.
  */
-float __ovld vload_half(size_t offset, const __constant half *p);
+float __ovld __purefn vload_half(size_t offset, const __constant half *p);
 #if defined(__opencl_c_generic_address_space)
-float __ovld vload_half(size_t offset, const half *p);
-#else
-float __ovld vload_half(size_t offset, const __global half *p);
-float __ovld vload_half(size_t offset, const __local half *p);
-float __ovld vload_half(size_t offset, const __private half *p);
+float __ovld __purefn vload_half(size_t offset, const half *p);
 #endif //defined(__opencl_c_generic_address_space)
 
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld __purefn vload_half(size_t offset, const __global half *p);
+float __ovld __purefn vload_half(size_t offset, const __local half *p);
+float __ovld __purefn vload_half(size_t offset, const __private half *p);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
 /**
  * Read sizeof (halfn) bytes of data from address
  * (p + (offset * n)). The data read is interpreted
@@ -11753,35 +11770,37 @@
  * value is returned. The read address computed
  * as (p + (offset * n)) must be 16-bit aligned.
  */
-float2 __ovld vload_half2(size_t offset, const __constant half *p);
-float3 __ovld vload_half3(size_t offset, const __constant half *p);
-float4 __ovld vload_half4(size_t offset, const __constant half *p);
-float8 __ovld vload_half8(size_t offset, const __constant half *p);
-float16 __ovld vload_half16(size_t offset, const __constant half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const __constant half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const __constant half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const __constant half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const __constant half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const __constant half *p);
 #if defined(__opencl_c_generic_address_space)
-float2 __ovld vload_half2(size_t offset, const half *p);
-float3 __ovld vload_half3(size_t offset, const half *p);
-float4 __ovld vload_half4(size_t offset, const half *p);
-float8 __ovld vload_half8(size_t offset, const half *p);
-float16 __ovld vload_half16(size_t offset, const half *p);
-#else
-float2 __ovld vload_half2(size_t offset, const __global half *p);
-float3 __ovld vload_half3(size_t offset, const __global half *p);
-float4 __ovld vload_half4(size_t offset, const __global half *p);
-float8 __ovld vload_half8(size_t offset, const __global half *p);
-float16 __ovld vload_half16(size_t offset, const __global half *p);
-float2 __ovld vload_half2(size_t offset, const __local half *p);
-float3 __ovld vload_half3(size_t offset, const __local half *p);
-float4 __ovld vload_half4(size_t offset, const __local half *p);
-float8 __ovld vload_half8(size_t offset, const __local half *p);
-float16 __ovld vload_half16(size_t offset, const __local half *p);
-float2 __ovld vload_half2(size_t offset, const __private half *p);
-float3 __ovld vload_half3(size_t offset, const __private half *p);
-float4 __ovld vload_half4(size_t offset, const __private half *p);
-float8 __ovld vload_half8(size_t offset, const __private half *p);
-float16 __ovld vload_half16(size_t offset, const __private half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const half *p);
 #endif //defined(__opencl_c_generic_address_space)
 
+#if defined(__opencl_c_named_address_space_builtins)
+float2 __ovld __purefn vload_half2(size_t offset, const __global half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const __global half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const __global half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const __global half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const __global half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const __local half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const __local half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const __local half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const __local half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const __local half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const __private half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const __private half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const __private half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const __private half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const __private half *p);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
 /**
  * The float value given by data is first
  * converted to a half value using the appropriate
@@ -11806,7 +11825,9 @@
 void __ovld vstore_half_rtp(double data, size_t offset, half *p);
 void __ovld vstore_half_rtn(double data, size_t offset, half *p);
 #endif //cl_khr_fp64
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 void __ovld vstore_half(float data, size_t offset, __global half *p);
 void __ovld vstore_half_rte(float data, size_t offset, __global half *p);
 void __ovld vstore_half_rtz(float data, size_t offset, __global half *p);
@@ -11839,7 +11860,7 @@
 void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
 void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
 #endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * The floatn value given by data is converted to
@@ -11905,7 +11926,9 @@
 void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p);
 void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p);
 #endif //cl_khr_fp64
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 void __ovld vstore_half2(float2 data, size_t offset, __global half *p);
 void __ovld vstore_half3(float3 data, size_t offset, __global half *p);
 void __ovld vstore_half4(float4 data, size_t offset, __global half *p);
@@ -12058,7 +12081,7 @@
 void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
 void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
 #endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
@@ -12073,35 +12096,37 @@
  * The address computed as (p + (offset * 4))
  * must be aligned to sizeof (half) * 4 bytes.
  */
-float2 __ovld vloada_half2(size_t offset, const __constant half *p);
-float3 __ovld vloada_half3(size_t offset, const __constant half *p);
-float4 __ovld vloada_half4(size_t offset, const __constant half *p);
-float8 __ovld vloada_half8(size_t offset, const __constant half *p);
-float16 __ovld vloada_half16(size_t offset, const __constant half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const __constant half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const __constant half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const __constant half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const __constant half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const __constant half *p);
 #if defined(__opencl_c_generic_address_space)
-float2 __ovld vloada_half2(size_t offset, const half *p);
-float3 __ovld vloada_half3(size_t offset, const half *p);
-float4 __ovld vloada_half4(size_t offset, const half *p);
-float8 __ovld vloada_half8(size_t offset, const half *p);
-float16 __ovld vloada_half16(size_t offset, const half *p);
-#else
-float2 __ovld vloada_half2(size_t offset, const __global half *p);
-float3 __ovld vloada_half3(size_t offset, const __global half *p);
-float4 __ovld vloada_half4(size_t offset, const __global half *p);
-float8 __ovld vloada_half8(size_t offset, const __global half *p);
-float16 __ovld vloada_half16(size_t offset, const __global half *p);
-float2 __ovld vloada_half2(size_t offset, const __local half *p);
-float3 __ovld vloada_half3(size_t offset, const __local half *p);
-float4 __ovld vloada_half4(size_t offset, const __local half *p);
-float8 __ovld vloada_half8(size_t offset, const __local half *p);
-float16 __ovld vloada_half16(size_t offset, const __local half *p);
-float2 __ovld vloada_half2(size_t offset, const __private half *p);
-float3 __ovld vloada_half3(size_t offset, const __private half *p);
-float4 __ovld vloada_half4(size_t offset, const __private half *p);
-float8 __ovld vloada_half8(size_t offset, const __private half *p);
-float16 __ovld vloada_half16(size_t offset, const __private half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const half *p);
 #endif //defined(__opencl_c_generic_address_space)
 
+#if defined(__opencl_c_named_address_space_builtins)
+float2 __ovld __purefn vloada_half2(size_t offset, const __global half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const __global half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const __global half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const __global half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const __global half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const __local half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const __local half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const __local half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const __local half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const __local half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const __private half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const __private half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const __private half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const __private half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const __private half *p);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
 /**
  * The floatn value given by data is converted to
  * a halfn value using the appropriate rounding
@@ -12180,8 +12205,9 @@
 void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p);
 void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p);
 #endif //cl_khr_fp64
+#endif //defined(__opencl_c_generic_address_space)
 
-#else
+#if defined(__opencl_c_named_address_space_builtins)
 void __ovld vstorea_half2(float2 data, size_t offset, __global half *p);
 void __ovld vstorea_half3(float3 data, size_t offset, __global half *p);
 void __ovld vstorea_half4(float4 data, size_t offset, __global half *p);
@@ -12363,7 +12389,7 @@
 void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
 void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
 #endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 // OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
 
@@ -15585,7 +15611,7 @@
 #endif //cl_khr_fp16
 
 // Image read functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord);
 int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord);
 uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord);
@@ -15628,7 +15654,6 @@
 float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 #ifdef cl_khr_mipmap_image
 float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
 int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
@@ -15679,7 +15704,6 @@
 uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
 
 #endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 
 // Image read functions returning half4 type
 #ifdef cl_khr_fp16
@@ -15690,7 +15714,7 @@
 half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
 half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
 #endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Write color value to location specified by coordinate
@@ -15834,7 +15858,7 @@
 #endif //cl_khr_fp16
 
 // Image write functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color);
 void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color);
 void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color);
@@ -15866,7 +15890,6 @@
 void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color);
 #endif //cl_khr_depth_images
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 #if defined(cl_khr_mipmap_image_writes)
 void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
 void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
@@ -15894,7 +15917,6 @@
 #endif //cl_khr_3d_image_writes
 
 #endif //cl_khr_mipmap_image_writes
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 
 // Image write functions for half4 type
 #ifdef cl_khr_fp16
@@ -15907,7 +15929,7 @@
 void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
 void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color);
 #endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 // Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
 // access qualifier, which by default assume read_only access qualifier. Image query builtin
@@ -15955,7 +15977,7 @@
 int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int __ovld __cnfn get_image_width(read_write image1d_t image);
 int __ovld __cnfn get_image_width(read_write image1d_buffer_t image);
 int __ovld __cnfn get_image_width(read_write image2d_t image);
@@ -15972,7 +15994,7 @@
 int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image);
 int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Return the image height in pixels.
@@ -16007,7 +16029,7 @@
 int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int __ovld __cnfn get_image_height(read_write image2d_t image);
 int __ovld __cnfn get_image_height(read_write image3d_t image);
 int __ovld __cnfn get_image_height(read_write image2d_array_t image);
@@ -16021,7 +16043,7 @@
 int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image);
 int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Return the image depth in pixels.
@@ -16032,9 +16054,9 @@
 int __ovld __cnfn get_image_depth(write_only image3d_t image);
 #endif
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int __ovld __cnfn get_image_depth(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 // OpenCL Extension v2.0 s9.18 - Mipmaps
 #if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -16053,9 +16075,11 @@
 int __ovld get_image_num_mip_levels(write_only image3d_t image);
 #endif
 
+#if defined(__opencl_c_read_write_images)
 int __ovld get_image_num_mip_levels(read_write image1d_t image);
 int __ovld get_image_num_mip_levels(read_write image2d_t image);
 int __ovld get_image_num_mip_levels(read_write image3d_t image);
+#endif //defined(__opencl_c_read_write_images)
 
 int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
 int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
@@ -16067,10 +16091,12 @@
 int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
 int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
 
+#if defined(__opencl_c_read_write_images)
 int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
 int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
 int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
 int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
+#endif //defined(__opencl_c_read_write_images)
 
 #endif //cl_khr_mipmap_image
 #endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -16130,7 +16156,7 @@
 int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image);
 int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image);
 int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image);
@@ -16147,7 +16173,7 @@
 int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image);
 int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Return the image channel order. Valid values are:
@@ -16202,7 +16228,7 @@
 int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int __ovld __cnfn get_image_channel_order(read_write image1d_t image);
 int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image);
 int __ovld __cnfn get_image_channel_order(read_write image2d_t image);
@@ -16219,7 +16245,7 @@
 int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image);
 int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Return the 2D image width and height as an int2
@@ -16252,7 +16278,7 @@
 int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int2 __ovld __cnfn get_image_dim(read_write image2d_t image);
 int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image);
 #ifdef cl_khr_depth_images
@@ -16265,7 +16291,7 @@
 int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image);
 int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Return the 3D image width, height, and depth as an
@@ -16277,9 +16303,9 @@
 #ifdef cl_khr_3d_image_writes
 int4 __ovld __cnfn get_image_dim(write_only image3d_t image);
 #endif
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int4 __ovld __cnfn get_image_dim(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Return the image array size.
@@ -16305,7 +16331,7 @@
 size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array);
 size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array);
 #ifdef cl_khr_depth_images
@@ -16315,7 +16341,7 @@
 size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array);
 size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array);
 #endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
 * Return the number of samples associated with image
@@ -16331,12 +16357,12 @@
 int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
 int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int __ovld get_image_num_samples(read_write image2d_msaa_t image);
 int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
 int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
 int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 #endif
 
 // OpenCL v2.0 s6.13.15 - Work-group Functions
@@ -16450,6 +16476,7 @@
 // OpenCL v2.0 s6.13.17 - Enqueue Kernels
 #if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 
+#ifdef __opencl_c_device_enqueue
 ndrange_t __ovld ndrange_1D(size_t);
 ndrange_t __ovld ndrange_1D(size_t, size_t);
 ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
@@ -16477,6 +16504,7 @@
 void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
 
 queue_t __ovld get_default_queue(void);
+#endif //__opencl_c_device_enqueue
 #endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 
 // OpenCL Extension v2.0 s9.17 - Sub-groups
@@ -17572,34 +17600,38 @@
 long    __ovld __conv intel_sub_group_shuffle_xor( long x, uint c );
 ulong   __ovld __conv intel_sub_group_shuffle_xor( ulong x, uint c );
 
+#if defined(__opencl_c_images)
 uint    __ovld __conv intel_sub_group_block_read( read_only image2d_t image, int2 coord );
 uint2   __ovld __conv intel_sub_group_block_read2( read_only image2d_t image, int2 coord );
 uint4   __ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord );
 uint8   __ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord );
+#endif
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 uint    __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord);
 uint2   __ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord);
 uint4   __ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord);
 uint8   __ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
 
 uint    __ovld __conv intel_sub_group_block_read( const __global uint* p );
 uint2   __ovld __conv intel_sub_group_block_read2( const __global uint* p );
 uint4   __ovld __conv intel_sub_group_block_read4( const __global uint* p );
 uint8   __ovld __conv intel_sub_group_block_read8( const __global uint* p );
 
+#if defined(__opencl_c_images)
 void    __ovld __conv intel_sub_group_block_write(write_only image2d_t image, int2 coord, uint data);
 void    __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, int2 coord, uint2 data);
 void    __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data);
 void    __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data);
+#endif // defined(__opencl_c_images)
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 void    __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord, uint data);
 void    __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data);
 void    __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data);
 void    __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
 
 void    __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );
 void    __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data );
@@ -17712,68 +17744,76 @@
 short       __ovld __conv intel_sub_group_scan_inclusive_max( short   x );
 ushort      __ovld __conv intel_sub_group_scan_inclusive_max( ushort  x );
 
+#if defined(__opencl_c_images)
 uint       __ovld __conv intel_sub_group_block_read_ui( read_only image2d_t image, int2 byte_coord );
 uint2      __ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t image, int2 byte_coord );
 uint4      __ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord );
 uint8      __ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord );
+#endif // defined(__opencl_c_images)
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 uint       __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord );
 uint2      __ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord );
 uint4      __ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord );
 uint8      __ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
 
 uint       __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );
 uint2      __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p );
 uint4      __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p );
 uint8      __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p );
 
+#if defined(__opencl_c_images)
 void       __ovld __conv intel_sub_group_block_write_ui( read_only image2d_t image, int2 byte_coord, uint data );
 void       __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t image, int2 byte_coord, uint2 data );
 void       __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data );
 void       __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data );
+#endif //defined(__opencl_c_images)
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 void       __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord, uint data );
 void       __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data );
 void       __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data );
 void       __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
 
 void       __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );
 void       __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data );
 void       __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint4 data );
 void       __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data );
 
+#if defined(__opencl_c_images)
 ushort      __ovld __conv intel_sub_group_block_read_us( read_only image2d_t image, int2 coord );
 ushort2     __ovld __conv intel_sub_group_block_read_us2( read_only image2d_t image, int2 coord );
 ushort4     __ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord );
 ushort8     __ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord );
+#endif // defined(__opencl_c_images)
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 ushort      __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord);
 ushort2     __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord);
 ushort4     __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord);
 ushort8     __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
 
 ushort      __ovld __conv intel_sub_group_block_read_us(  const __global ushort* p );
 ushort2     __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p );
 ushort4     __ovld __conv intel_sub_group_block_read_us4( const __global ushort* p );
 ushort8     __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p );
 
+#if defined(__opencl_c_images)
 void        __ovld __conv intel_sub_group_block_write_us(write_only image2d_t image, int2 coord, ushort  data);
 void        __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t image, int2 coord, ushort2 data);
 void        __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data);
 void        __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data);
+#endif // defined(__opencl_c_images)
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 void        __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord, ushort  data);
 void        __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data);
 void        __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data);
 void        __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
 
 void        __ovld __conv intel_sub_group_block_write_us(  __global ushort* p, ushort  data );
 void        __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data );
@@ -17891,6 +17931,7 @@
     short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
     ushort2 image_size);
 
+#if defined(__opencl_c_images)
 intel_sub_group_avc_ime_result_t __ovld
 intel_sub_group_avc_ime_evaluate_with_single_reference(
     read_only image2d_t src_image, read_only image2d_t ref_image,
@@ -17931,6 +17972,7 @@
     read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
     intel_sub_group_avc_ime_payload_t payload,
     intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
+#endif
 
 intel_sub_group_avc_ime_single_reference_streamin_t __ovld
 intel_sub_group_avc_ime_get_single_reference_streamin(
@@ -17995,6 +18037,7 @@
 intel_sub_group_avc_ref_set_bilinear_filter_enable(
     intel_sub_group_avc_ref_payload_t payload);
 
+#if defined(__opencl_c_images)
 intel_sub_group_avc_ref_result_t __ovld
 intel_sub_group_avc_ref_evaluate_with_single_reference(
     read_only image2d_t src_image, read_only image2d_t ref_image,
@@ -18013,6 +18056,7 @@
     read_only image2d_t src_image, uint packed_reference_ids,
     uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
     intel_sub_group_avc_ref_payload_t payload);
+#endif //defined(__opencl_c_images)
 
 // SIC built-in functions
 intel_sub_group_avc_sic_payload_t __ovld
@@ -18063,6 +18107,7 @@
     uchar block_based_skip_type,
     intel_sub_group_avc_sic_payload_t payload);
 
+#if defined(__opencl_c_images)
 intel_sub_group_avc_sic_result_t __ovld
 intel_sub_group_avc_sic_evaluate_ipe(
     read_only image2d_t src_image, sampler_t vme_media_sampler,
@@ -18085,6 +18130,7 @@
     read_only image2d_t src_image, uint packed_reference_ids,
     uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
     intel_sub_group_avc_sic_payload_t payload);
+#endif //defined(__opencl_c_images)
 
 uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
     intel_sub_group_avc_sic_result_t result);
@@ -18493,6 +18539,8 @@
 // Disable any extensions we may have enabled previously.
 #pragma OPENCL EXTENSION all : disable
 
+#undef __opencl_c_named_address_space_builtins
+
 #undef __cnfn
 #undef __ovld
 #endif //_OPENCL_H_
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/__clang_openmp_device_functions.h b/darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/__clang_openmp_device_functions.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/__clang_openmp_device_functions.h
rename to darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/__clang_openmp_device_functions.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/cmath b/darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/cmath
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/cmath
rename to darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/cmath
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex b/darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/complex
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex
rename to darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/complex
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex.h b/darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/complex.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex.h
rename to darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/complex.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex_cmath.h b/darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/complex_cmath.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex_cmath.h
rename to darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/complex_cmath.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/math.h b/darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/math.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/math.h
rename to darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/math.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/new b/darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/new
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/openmp_wrappers/new
rename to darwin-x86/lib64/clang/14.0.6/include/openmp_wrappers/new
diff --git a/darwin-x86/lib64/clang/14.0.2/include/pconfigintrin.h b/darwin-x86/lib64/clang/14.0.6/include/pconfigintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/pconfigintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/pconfigintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/pkuintrin.h b/darwin-x86/lib64/clang/14.0.6/include/pkuintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/pkuintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/pkuintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/pmmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/pmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/pmmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/pmmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/popcntintrin.h b/darwin-x86/lib64/clang/14.0.6/include/popcntintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/popcntintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/popcntintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/emmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/emmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/emmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/emmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mm_malloc.h b/darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/mm_malloc.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mm_malloc.h
rename to darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/mm_malloc.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/mmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/mmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/pmmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/pmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/pmmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/pmmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/smmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/smmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/smmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/smmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/tmmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/tmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/tmmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/tmmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/xmmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/xmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/ppc_wrappers/xmmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/ppc_wrappers/xmmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/prfchwintrin.h b/darwin-x86/lib64/clang/14.0.6/include/prfchwintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/prfchwintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/prfchwintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc b/darwin-x86/lib64/clang/14.0.6/include/profile/InstrProfData.inc
similarity index 98%
rename from darwin-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc
rename to darwin-x86/lib64/clang/14.0.6/include/profile/InstrProfData.inc
index 008b8dd..62054a6 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc
+++ b/darwin-x86/lib64/clang/14.0.6/include/profile/InstrProfData.inc
@@ -128,8 +128,10 @@
 INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
 INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
 INSTR_PROF_RAW_HEADER(uint64_t, BinaryIdsSize, __llvm_write_binary_ids(NULL))
+/* FIXME: A more accurate name is NumData */
 INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
 INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters)
+/* FIXME: A more accurate name is NumCounters */
 INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
 INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters)
 INSTR_PROF_RAW_HEADER(uint64_t, NamesSize,  NamesSize)
@@ -644,6 +646,7 @@
        (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
         (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
 
+/* FIXME: Please remedy the fixme in the header before bumping the version. */
 /* Raw profile format version (start from 1). */
 #define INSTR_PROF_RAW_VERSION 8
 /* Indexed profile format version (start from 1). */
@@ -653,15 +656,21 @@
 
 /* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
  * version for other variants of profile. We set the lowest bit of the upper 8
- * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
+ * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentation
  * generated profile, and 0 if this is a Clang FE generated profile.
  * 1 in bit 57 indicates there are context-sensitive records in the profile.
+ * The 59th bit indicates whether to use debug info to correlate profiles.
+ * The 60th bit indicates single byte coverage instrumentation.
+ * The 61st bit indicates function entry instrumentation only.
  */
 #define VARIANT_MASKS_ALL 0xff00000000000000ULL
 #define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
 #define VARIANT_MASK_IR_PROF (0x1ULL << 56)
 #define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
 #define VARIANT_MASK_INSTR_ENTRY (0x1ULL << 58)
+#define VARIANT_MASK_DBG_CORRELATE (0x1ULL << 59)
+#define VARIANT_MASK_BYTE_COVERAGE (0x1ULL << 60)
+#define VARIANT_MASK_FUNCTION_ENTRY_ONLY (0x1ULL << 61)
 #define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
 #define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
 #define INSTR_PROF_PROFILE_COUNTER_BIAS_VAR __llvm_profile_counter_bias
diff --git a/darwin-x86/lib64/clang/14.0.2/include/ptwriteintrin.h b/darwin-x86/lib64/clang/14.0.6/include/ptwriteintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/ptwriteintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/ptwriteintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/rdseedintrin.h b/darwin-x86/lib64/clang/14.0.6/include/rdseedintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/rdseedintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/rdseedintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/rtmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/rtmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/rtmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/rtmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/s390intrin.h b/darwin-x86/lib64/clang/14.0.6/include/s390intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/s390intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/s390intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/allocator_interface.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/allocator_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/allocator_interface.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/allocator_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/asan_interface.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/asan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/asan_interface.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/asan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/common_interface_defs.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/common_interface_defs.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/coverage_interface.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/coverage_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/coverage_interface.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/coverage_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/dfsan_interface.h
similarity index 79%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/dfsan_interface.h
index d6209a3..8e581a6 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/dfsan_interface.h
@@ -27,6 +27,10 @@
 /// Signature of the callback argument to dfsan_set_write_callback().
 typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
 
+/// Signature of the callback argument to dfsan_set_conditional_callback().
+typedef void (*dfsan_conditional_callback_t)(dfsan_label label,
+                                             dfsan_origin origin);
+
 /// Computes the union of \c l1 and \c l2, resulting in a union label.
 dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
 
@@ -54,6 +58,10 @@
 /// Retrieves the label associated with the data at the given address.
 dfsan_label dfsan_read_label(const void *addr, size_t size);
 
+/// Return the origin associated with the first taint byte in the size bytes
+/// from the address addr.
+dfsan_origin dfsan_read_origin_of_first_taint(const void *addr, size_t size);
+
 /// Returns whether the given label label contains the label elem.
 int dfsan_has_label(dfsan_label label, dfsan_label elem);
 
@@ -70,6 +78,19 @@
 /// callback executes.  Pass in NULL to remove any callback.
 void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
 
+/// Sets a callback to be invoked on any conditional expressions which have a
+/// taint label set. This can be used to find where tainted data influences
+/// the behavior of the program.
+/// These callbacks will only be added when -dfsan-conditional-callbacks=true.
+void dfsan_set_conditional_callback(dfsan_conditional_callback_t callback);
+
+/// Conditional expressions occur during signal handlers.
+/// Making callbacks that handle signals well is tricky, so when
+/// -dfsan-conditional-callbacks=true, conditional expressions used in signal
+/// handlers will add the labels they see into a global (bitwise-or together).
+/// This function returns all label bits seen in signal handler conditions.
+dfsan_label dfsan_get_labels_in_signal_conditional();
+
 /// Interceptor hooks.
 /// Whenever a dfsan's custom function is called the corresponding
 /// hook is called it non-zero. The hooks should be defined by the user.
@@ -87,6 +108,9 @@
 /// prints description at the beginning of the trace. If origin tracking is not
 /// on, or the address is not labeled, it prints nothing.
 void dfsan_print_origin_trace(const void *addr, const char *description);
+/// As above, but use an origin id from dfsan_get_origin() instead of address.
+/// Does not include header line with taint label and address information.
+void dfsan_print_origin_id_trace(dfsan_origin origin);
 
 /// Prints the origin trace of the label at the address \p addr to a
 /// pre-allocated output buffer. If origin tracking is not on, or the address is
@@ -124,6 +148,10 @@
 /// return value is not less than \p out_buf_size.
 size_t dfsan_sprint_origin_trace(const void *addr, const char *description,
                                  char *out_buf, size_t out_buf_size);
+/// As above, but use an origin id from dfsan_get_origin() instead of address.
+/// Does not include header line with taint label and address information.
+size_t dfsan_sprint_origin_id_trace(dfsan_origin origin, char *out_buf,
+                                    size_t out_buf_size);
 
 /// Prints the stack trace leading to this call to a pre-allocated output
 /// buffer.
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/hwasan_interface.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/hwasan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/hwasan_interface.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/hwasan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/linux_syscall_hooks.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/linux_syscall_hooks.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/linux_syscall_hooks.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/linux_syscall_hooks.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/lsan_interface.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/lsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/lsan_interface.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/lsan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/msan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/msan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/netbsd_syscall_hooks.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/netbsd_syscall_hooks.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/netbsd_syscall_hooks.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/netbsd_syscall_hooks.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/scudo_interface.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/scudo_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/scudo_interface.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/scudo_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/tsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/tsan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface_atomic.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/tsan_interface_atomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface_atomic.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/tsan_interface_atomic.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/ubsan_interface.h b/darwin-x86/lib64/clang/14.0.6/include/sanitizer/ubsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sanitizer/ubsan_interface.h
rename to darwin-x86/lib64/clang/14.0.6/include/sanitizer/ubsan_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/serializeintrin.h b/darwin-x86/lib64/clang/14.0.6/include/serializeintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/serializeintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/serializeintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sgxintrin.h b/darwin-x86/lib64/clang/14.0.6/include/sgxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/sgxintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/sgxintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/shaintrin.h b/darwin-x86/lib64/clang/14.0.6/include/shaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/shaintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/shaintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/smmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/smmintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/smmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/smmintrin.h
index 710e55a..0df59c5 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/smmintrin.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/smmintrin.h
@@ -668,7 +668,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_min_epi8 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
+  return (__m128i) __builtin_elementwise_min((__v16qs) __V1, (__v16qs) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -687,7 +687,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_max_epi8 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
+  return (__m128i) __builtin_elementwise_max((__v16qs) __V1, (__v16qs) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -706,7 +706,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_min_epu16 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
+  return (__m128i) __builtin_elementwise_min((__v8hu) __V1, (__v8hu) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -725,7 +725,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_max_epu16 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
+  return (__m128i) __builtin_elementwise_max((__v8hu) __V1, (__v8hu) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -744,7 +744,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_min_epi32 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
+  return (__m128i) __builtin_elementwise_min((__v4si) __V1, (__v4si) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -763,7 +763,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_max_epi32 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
+  return (__m128i) __builtin_elementwise_max((__v4si) __V1, (__v4si) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -782,7 +782,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_min_epu32 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
+  return (__m128i) __builtin_elementwise_min((__v4su) __V1, (__v4su) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -801,7 +801,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_max_epu32 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
+  return (__m128i) __builtin_elementwise_max((__v4su) __V1, (__v4su) __V2);
 }
 
 /* SSE4 Insertion and Extraction from XMM Register Instructions.  */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdalign.h b/darwin-x86/lib64/clang/14.0.6/include/stdalign.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/stdalign.h
rename to darwin-x86/lib64/clang/14.0.6/include/stdalign.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdarg.h b/darwin-x86/lib64/clang/14.0.6/include/stdarg.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/stdarg.h
rename to darwin-x86/lib64/clang/14.0.6/include/stdarg.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdatomic.h b/darwin-x86/lib64/clang/14.0.6/include/stdatomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/stdatomic.h
rename to darwin-x86/lib64/clang/14.0.6/include/stdatomic.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdbool.h b/darwin-x86/lib64/clang/14.0.6/include/stdbool.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/stdbool.h
rename to darwin-x86/lib64/clang/14.0.6/include/stdbool.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stddef.h b/darwin-x86/lib64/clang/14.0.6/include/stddef.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/stddef.h
rename to darwin-x86/lib64/clang/14.0.6/include/stddef.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdint.h b/darwin-x86/lib64/clang/14.0.6/include/stdint.h
similarity index 76%
rename from darwin-x86/lib64/clang/14.0.2/include/stdint.h
rename to darwin-x86/lib64/clang/14.0.6/include/stdint.h
index 192f653..4790c25 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/stdint.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/stdint.h
@@ -461,6 +461,18 @@
 # define INT64_MAX           INT64_C( 9223372036854775807)
 # define INT64_MIN         (-INT64_C( 9223372036854775807)-1)
 # define UINT64_MAX         UINT64_C(18446744073709551615)
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT64_WIDTH         64
+# define INT64_WIDTH          UINT64_WIDTH
+
+# define __UINT_LEAST64_WIDTH UINT64_WIDTH
+# define __UINT_LEAST32_WIDTH UINT64_WIDTH
+# define __UINT_LEAST16_WIDTH UINT64_WIDTH
+# define __UINT_LEAST8_MAX UINT64_MAX
+#endif /* __STDC_VERSION__ */
+
 # define __INT_LEAST64_MIN   INT64_MIN
 # define __INT_LEAST64_MAX   INT64_MAX
 # define __UINT_LEAST64_MAX UINT64_MAX
@@ -482,6 +494,15 @@
 # define INT_FAST64_MIN    __INT_LEAST64_MIN
 # define INT_FAST64_MAX    __INT_LEAST64_MAX
 # define UINT_FAST64_MAX  __UINT_LEAST64_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT_LEAST64_WIDTH __UINT_LEAST64_WIDTH
+# define INT_LEAST64_WIDTH  UINT_LEAST64_WIDTH
+# define UINT_FAST64_WIDTH  __UINT_LEAST64_WIDTH
+# define INT_FAST64_WIDTH   UINT_FAST64_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST64_MIN */
 
 
@@ -495,6 +516,7 @@
 # define INT_FAST56_MIN      INT56_MIN
 # define INT_FAST56_MAX      INT56_MAX
 # define UINT_FAST56_MAX    UINT56_MAX
+
 # define __INT_LEAST32_MIN   INT56_MIN
 # define __INT_LEAST32_MAX   INT56_MAX
 # define __UINT_LEAST32_MAX UINT56_MAX
@@ -504,6 +526,20 @@
 # define __INT_LEAST8_MIN    INT56_MIN
 # define __INT_LEAST8_MAX    INT56_MAX
 # define __UINT_LEAST8_MAX  UINT56_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT56_WIDTH         56
+# define INT56_WIDTH          UINT56_WIDTH
+# define UINT_LEAST56_WIDTH   UINT56_WIDTH
+# define INT_LEAST56_WIDTH    UINT_LEAST56_WIDTH
+# define UINT_FAST56_WIDTH    UINT56_WIDTH
+# define INT_FAST56_WIDTH     UINT_FAST56_WIDTH
+# define __UINT_LEAST32_WIDTH UINT56_WIDTH
+# define __UINT_LEAST16_WIDTH UINT56_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT56_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT56_TYPE__ */
 
 
@@ -517,6 +553,7 @@
 # define INT_FAST48_MIN      INT48_MIN
 # define INT_FAST48_MAX      INT48_MAX
 # define UINT_FAST48_MAX    UINT48_MAX
+
 # define __INT_LEAST32_MIN   INT48_MIN
 # define __INT_LEAST32_MAX   INT48_MAX
 # define __UINT_LEAST32_MAX UINT48_MAX
@@ -526,6 +563,20 @@
 # define __INT_LEAST8_MIN    INT48_MIN
 # define __INT_LEAST8_MAX    INT48_MAX
 # define __UINT_LEAST8_MAX  UINT48_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+#define UINT48_WIDTH         48
+#define INT48_WIDTH          UINT48_WIDTH
+#define UINT_LEAST48_WIDTH   UINT48_WIDTH
+#define INT_LEAST48_WIDTH    UINT_LEAST48_WIDTH
+#define UINT_FAST48_WIDTH    UINT48_WIDTH
+#define INT_FAST48_WIDTH     UINT_FAST48_WIDTH
+#define __UINT_LEAST32_WIDTH UINT48_WIDTH
+#define __UINT_LEAST16_WIDTH UINT48_WIDTH
+#define __UINT_LEAST8_WIDTH  UINT48_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT48_TYPE__ */
 
 
@@ -539,6 +590,7 @@
 # define INT_FAST40_MIN      INT40_MIN
 # define INT_FAST40_MAX      INT40_MAX
 # define UINT_FAST40_MAX    UINT40_MAX
+
 # define __INT_LEAST32_MIN   INT40_MIN
 # define __INT_LEAST32_MAX   INT40_MAX
 # define __UINT_LEAST32_MAX UINT40_MAX
@@ -548,6 +600,20 @@
 # define __INT_LEAST8_MIN    INT40_MIN
 # define __INT_LEAST8_MAX    INT40_MAX
 # define __UINT_LEAST8_MAX  UINT40_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT40_WIDTH         40
+# define INT40_WIDTH          UINT40_WIDTH
+# define UINT_LEAST40_WIDTH   UINT40_WIDTH
+# define INT_LEAST40_WIDTH    UINT_LEAST40_WIDTH
+# define UINT_FAST40_WIDTH    UINT40_WIDTH
+# define INT_FAST40_WIDTH     UINT_FAST40_WIDTH
+# define __UINT_LEAST32_WIDTH UINT40_WIDTH
+# define __UINT_LEAST16_WIDTH UINT40_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT40_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT40_TYPE__ */
 
 
@@ -555,6 +621,7 @@
 # define INT32_MAX           INT32_C(2147483647)
 # define INT32_MIN         (-INT32_C(2147483647)-1)
 # define UINT32_MAX         UINT32_C(4294967295)
+
 # define __INT_LEAST32_MIN   INT32_MIN
 # define __INT_LEAST32_MAX   INT32_MAX
 # define __UINT_LEAST32_MAX UINT32_MAX
@@ -564,6 +631,16 @@
 # define __INT_LEAST8_MIN    INT32_MIN
 # define __INT_LEAST8_MAX    INT32_MAX
 # define __UINT_LEAST8_MAX  UINT32_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT32_WIDTH         32
+# define INT32_WIDTH          UINT32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT32_WIDTH
+# define __UINT_LEAST16_WIDTH UINT32_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT32_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT32_TYPE__ */
 
 #ifdef __INT_LEAST32_MIN
@@ -573,6 +650,15 @@
 # define INT_FAST32_MIN    __INT_LEAST32_MIN
 # define INT_FAST32_MAX    __INT_LEAST32_MAX
 # define UINT_FAST32_MAX  __UINT_LEAST32_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT_LEAST32_WIDTH __UINT_LEAST32_WIDTH
+# define INT_LEAST32_WIDTH  UINT_LEAST32_WIDTH
+# define UINT_FAST32_WIDTH  __UINT_LEAST32_WIDTH
+# define INT_FAST32_WIDTH   UINT_FAST32_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST32_MIN */
 
 
@@ -586,12 +672,26 @@
 # define INT_FAST24_MIN      INT24_MIN
 # define INT_FAST24_MAX      INT24_MAX
 # define UINT_FAST24_MAX    UINT24_MAX
+
 # define __INT_LEAST16_MIN   INT24_MIN
 # define __INT_LEAST16_MAX   INT24_MAX
 # define __UINT_LEAST16_MAX UINT24_MAX
 # define __INT_LEAST8_MIN    INT24_MIN
 # define __INT_LEAST8_MAX    INT24_MAX
 # define __UINT_LEAST8_MAX  UINT24_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT24_WIDTH         24
+# define INT24_WIDTH          UINT24_WIDTH
+# define UINT_LEAST24_WIDTH   UINT24_WIDTH
+# define INT_LEAST24_WIDTH    UINT_LEAST24_WIDTH
+# define UINT_FAST24_WIDTH    UINT24_WIDTH
+# define INT_FAST24_WIDTH     UINT_FAST24_WIDTH
+# define __UINT_LEAST16_WIDTH UINT24_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT24_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT24_TYPE__ */
 
 
@@ -599,12 +699,22 @@
 #define INT16_MAX            INT16_C(32767)
 #define INT16_MIN          (-INT16_C(32767)-1)
 #define UINT16_MAX          UINT16_C(65535)
+
 # define __INT_LEAST16_MIN   INT16_MIN
 # define __INT_LEAST16_MAX   INT16_MAX
 # define __UINT_LEAST16_MAX UINT16_MAX
 # define __INT_LEAST8_MIN    INT16_MIN
 # define __INT_LEAST8_MAX    INT16_MAX
 # define __UINT_LEAST8_MAX  UINT16_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT16_WIDTH         16
+# define INT16_WIDTH          UINT16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT16_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT16_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT16_TYPE__ */
 
 #ifdef __INT_LEAST16_MIN
@@ -614,6 +724,15 @@
 # define INT_FAST16_MIN    __INT_LEAST16_MIN
 # define INT_FAST16_MAX    __INT_LEAST16_MAX
 # define UINT_FAST16_MAX  __UINT_LEAST16_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT_LEAST16_WIDTH __UINT_LEAST16_WIDTH
+# define INT_LEAST16_WIDTH  UINT_LEAST16_WIDTH
+# define UINT_FAST16_WIDTH  __UINT_LEAST16_WIDTH
+# define INT_FAST16_WIDTH   UINT_FAST16_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST16_MIN */
 
 
@@ -621,9 +740,18 @@
 # define INT8_MAX            INT8_C(127)
 # define INT8_MIN          (-INT8_C(127)-1)
 # define UINT8_MAX          UINT8_C(255)
+
 # define __INT_LEAST8_MIN    INT8_MIN
 # define __INT_LEAST8_MAX    INT8_MAX
 # define __UINT_LEAST8_MAX  UINT8_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT8_WIDTH         8
+# define INT8_WIDTH          UINT8_WIDTH
+# define __UINT_LEAST8_WIDTH UINT8_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT8_TYPE__ */
 
 #ifdef __INT_LEAST8_MIN
@@ -633,6 +761,15 @@
 # define INT_FAST8_MIN    __INT_LEAST8_MIN
 # define INT_FAST8_MAX    __INT_LEAST8_MAX
 # define UINT_FAST8_MAX  __UINT_LEAST8_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT_LEAST8_WIDTH __UINT_LEAST8_WIDTH
+# define INT_LEAST8_WIDTH  UINT_LEAST8_WIDTH
+# define UINT_FAST8_WIDTH  __UINT_LEAST8_WIDTH
+# define INT_FAST8_WIDTH   UINT_FAST8_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST8_MIN */
 
 /* Some utility macros */
@@ -652,6 +789,16 @@
 #define PTRDIFF_MAX   __PTRDIFF_MAX__
 #define    SIZE_MAX      __SIZE_MAX__
 
+/* C2x 7.20.2.4 Width of integer types capable of holding object pointers. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+/* NB: The C standard requires that these be the same value, but the compiler
+   exposes separate internal width macros. */
+#define INTPTR_WIDTH  __INTPTR_WIDTH__
+#define UINTPTR_WIDTH __UINTPTR_WIDTH__
+#endif
+
 /* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__
  * is enabled. */
 #if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
@@ -663,6 +810,16 @@
 #define  INTMAX_MAX   __INTMAX_MAX__
 #define UINTMAX_MAX  __UINTMAX_MAX__
 
+/* C2x 7.20.2.5 Width of greatest-width integer types. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+/* NB: The C standard requires that these be the same value, but the compiler
+   exposes separate internal width macros. */
+#define INTMAX_WIDTH __INTMAX_WIDTH__
+#define UINTMAX_WIDTH __UINTMAX_WIDTH__
+#endif
+
 /* C99 7.18.3 Limits of other integer types. */
 #define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
 #define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
@@ -689,5 +846,16 @@
 #define  INTMAX_C(v) __int_c(v,  __INTMAX_C_SUFFIX__)
 #define UINTMAX_C(v) __int_c(v, __UINTMAX_C_SUFFIX__)
 
+/* C2x 7.20.3.x Width of other integer types. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+#define PTRDIFF_WIDTH    __PTRDIFF_WIDTH__
+#define SIG_ATOMIC_WIDTH __SIG_ATOMIC_WIDTH__
+#define SIZE_WIDTH       __SIZE_WIDTH__
+#define WCHAR_WIDTH      __WCHAR_WIDTH__
+#define WINT_WIDTH       __WINT_WIDTH__
+#endif
+
 #endif /* __STDC_HOSTED__ */
 #endif /* __CLANG_STDINT_H */
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdnoreturn.h b/darwin-x86/lib64/clang/14.0.6/include/stdnoreturn.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/stdnoreturn.h
rename to darwin-x86/lib64/clang/14.0.6/include/stdnoreturn.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/tbmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/tbmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/tbmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/tbmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/tgmath.h b/darwin-x86/lib64/clang/14.0.6/include/tgmath.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/tgmath.h
rename to darwin-x86/lib64/clang/14.0.6/include/tgmath.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/tmmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/tmmintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/14.0.2/include/tmmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/tmmintrin.h
index bcffa81..cb9be23 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/tmmintrin.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/tmmintrin.h
@@ -53,7 +53,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi8(__m128i __a)
 {
-    return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a);
+    return (__m128i)__builtin_elementwise_abs((__v16qs)__a);
 }
 
 /// Computes the absolute value of each of the packed 16-bit signed
@@ -89,7 +89,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi16(__m128i __a)
 {
-    return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a);
+    return (__m128i)__builtin_elementwise_abs((__v8hi)__a);
 }
 
 /// Computes the absolute value of each of the packed 32-bit signed
@@ -125,7 +125,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi32(__m128i __a)
 {
-    return (__m128i)__builtin_ia32_pabsd128((__v4si)__a);
+    return (__m128i)__builtin_elementwise_abs((__v4si)__a);
 }
 
 /// Concatenates the two 128-bit integer vector operands, and
diff --git a/darwin-x86/lib64/clang/14.0.2/include/tsxldtrkintrin.h b/darwin-x86/lib64/clang/14.0.6/include/tsxldtrkintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/tsxldtrkintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/tsxldtrkintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/uintrintrin.h b/darwin-x86/lib64/clang/14.0.6/include/uintrintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/uintrintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/uintrintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/unwind.h b/darwin-x86/lib64/clang/14.0.6/include/unwind.h
similarity index 98%
rename from darwin-x86/lib64/clang/14.0.2/include/unwind.h
rename to darwin-x86/lib64/clang/14.0.6/include/unwind.h
index 029524b..6e06979 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/unwind.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/unwind.h
@@ -172,7 +172,8 @@
   _UVRSC_CORE = 0,        /* integer register */
   _UVRSC_VFP = 1,         /* vfp */
   _UVRSC_WMMXD = 3,       /* Intel WMMX data register */
-  _UVRSC_WMMXC = 4        /* Intel WMMX control register */
+  _UVRSC_WMMXC = 4,       /* Intel WMMX control register */
+  _UVRSC_PSEUDO = 5       /* Special purpose pseudo register */
 } _Unwind_VRS_RegClass;
 
 typedef enum {
diff --git a/darwin-x86/lib64/clang/14.0.2/include/vadefs.h b/darwin-x86/lib64/clang/14.0.6/include/vadefs.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/vadefs.h
rename to darwin-x86/lib64/clang/14.0.6/include/vadefs.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/vaesintrin.h b/darwin-x86/lib64/clang/14.0.6/include/vaesintrin.h
similarity index 98%
rename from darwin-x86/lib64/clang/14.0.2/include/vaesintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/vaesintrin.h
index f3c0807..294dcff 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/vaesintrin.h
+++ b/darwin-x86/lib64/clang/14.0.6/include/vaesintrin.h
@@ -82,4 +82,4 @@
 #undef __DEFAULT_FN_ATTRS
 #undef __DEFAULT_FN_ATTRS_F
 
-#endif
+#endif // __VAESINTRIN_H
diff --git a/darwin-x86/lib64/clang/14.0.2/include/varargs.h b/darwin-x86/lib64/clang/14.0.6/include/varargs.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/varargs.h
rename to darwin-x86/lib64/clang/14.0.6/include/varargs.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/vecintrin.h b/darwin-x86/lib64/clang/14.0.6/include/vecintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/vecintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/vecintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/vpclmulqdqintrin.h b/darwin-x86/lib64/clang/14.0.6/include/vpclmulqdqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/vpclmulqdqintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/vpclmulqdqintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/waitpkgintrin.h b/darwin-x86/lib64/clang/14.0.6/include/waitpkgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/waitpkgintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/waitpkgintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/wasm_simd128.h b/darwin-x86/lib64/clang/14.0.6/include/wasm_simd128.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/wasm_simd128.h
rename to darwin-x86/lib64/clang/14.0.6/include/wasm_simd128.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/wbnoinvdintrin.h b/darwin-x86/lib64/clang/14.0.6/include/wbnoinvdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/wbnoinvdintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/wbnoinvdintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/wmmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/wmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/wmmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/wmmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/x86gprintrin.h b/darwin-x86/lib64/clang/14.0.6/include/x86gprintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/x86gprintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/x86gprintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/x86intrin.h b/darwin-x86/lib64/clang/14.0.6/include/x86intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/x86intrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/x86intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xmmintrin.h b/darwin-x86/lib64/clang/14.0.6/include/xmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xmmintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/xmmintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xopintrin.h b/darwin-x86/lib64/clang/14.0.6/include/xopintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xopintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/xopintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xsavecintrin.h b/darwin-x86/lib64/clang/14.0.6/include/xsavecintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xsavecintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/xsavecintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xsaveintrin.h b/darwin-x86/lib64/clang/14.0.6/include/xsaveintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xsaveintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/xsaveintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xsaveoptintrin.h b/darwin-x86/lib64/clang/14.0.6/include/xsaveoptintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xsaveoptintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/xsaveoptintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xsavesintrin.h b/darwin-x86/lib64/clang/14.0.6/include/xsavesintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xsavesintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/xsavesintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/xtestintrin.h b/darwin-x86/lib64/clang/14.0.6/include/xtestintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/include/xtestintrin.h
rename to darwin-x86/lib64/clang/14.0.6/include/xtestintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/share/asan_ignorelist.txt b/darwin-x86/lib64/clang/14.0.6/share/asan_ignorelist.txt
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/share/asan_ignorelist.txt
rename to darwin-x86/lib64/clang/14.0.6/share/asan_ignorelist.txt
diff --git a/darwin-x86/lib64/clang/14.0.2/share/cfi_ignorelist.txt b/darwin-x86/lib64/clang/14.0.6/share/cfi_ignorelist.txt
similarity index 100%
rename from darwin-x86/lib64/clang/14.0.2/share/cfi_ignorelist.txt
rename to darwin-x86/lib64/clang/14.0.6/share/cfi_ignorelist.txt
diff --git a/darwin-x86/lib64/libbase.dylib b/darwin-x86/lib64/libbase.dylib
index 4b405c7..7d6e2e2 100755
--- a/darwin-x86/lib64/libbase.dylib
+++ b/darwin-x86/lib64/libbase.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++.1.dylib b/darwin-x86/lib64/libc++.1.dylib
index ffda1a3..f4ecacc 100755
--- a/darwin-x86/lib64/libc++.1.dylib
+++ b/darwin-x86/lib64/libc++.1.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++.dylib b/darwin-x86/lib64/libc++.dylib
index 9121ca8..345bdd1 100755
--- a/darwin-x86/lib64/libc++.dylib
+++ b/darwin-x86/lib64/libc++.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++abi.1.dylib b/darwin-x86/lib64/libc++abi.1.dylib
index b47ac58..2454bb8 100755
--- a/darwin-x86/lib64/libc++abi.1.dylib
+++ b/darwin-x86/lib64/libc++abi.1.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libclang-cpp.dylib b/darwin-x86/lib64/libclang-cpp.dylib
index e7e46d7..4995f53 100755
--- a/darwin-x86/lib64/libclang-cpp.dylib
+++ b/darwin-x86/lib64/libclang-cpp.dylib
Binary files differ
diff --git a/darwin-x86/lib64/liblog.dylib b/darwin-x86/lib64/liblog.dylib
index 9f0f1b1..f031387 100755
--- a/darwin-x86/lib64/liblog.dylib
+++ b/darwin-x86/lib64/liblog.dylib
Binary files differ
diff --git a/linux-x86/bin/bindgen b/linux-x86/bin/bindgen
index bc0ef1d..38d94cf 100755
--- a/linux-x86/bin/bindgen
+++ b/linux-x86/bin/bindgen
Binary files differ
diff --git a/linux-x86/bin/cxx_extractor b/linux-x86/bin/cxx_extractor
index 7ab8dff..0447389 100755
--- a/linux-x86/bin/cxx_extractor
+++ b/linux-x86/bin/cxx_extractor
Binary files differ
diff --git a/linux-x86/bin/header-abi-diff b/linux-x86/bin/header-abi-diff
index 30ce9d3..8fe710d 100755
--- a/linux-x86/bin/header-abi-diff
+++ b/linux-x86/bin/header-abi-diff
Binary files differ
diff --git a/linux-x86/bin/header-abi-dumper b/linux-x86/bin/header-abi-dumper
index 30f803a..93240a0 100755
--- a/linux-x86/bin/header-abi-dumper
+++ b/linux-x86/bin/header-abi-dumper
Binary files differ
diff --git a/linux-x86/bin/header-abi-linker b/linux-x86/bin/header-abi-linker
index 521954a..43e16c4 100755
--- a/linux-x86/bin/header-abi-linker
+++ b/linux-x86/bin/header-abi-linker
Binary files differ
diff --git a/linux-x86/bin/proto_metadata_plugin b/linux-x86/bin/proto_metadata_plugin
index c2f2579..316a2d2 100755
--- a/linux-x86/bin/proto_metadata_plugin
+++ b/linux-x86/bin/proto_metadata_plugin
Binary files differ
diff --git a/linux-x86/bin/protoc_extractor b/linux-x86/bin/protoc_extractor
index 7217d3c..35e7d21 100755
--- a/linux-x86/bin/protoc_extractor
+++ b/linux-x86/bin/protoc_extractor
Binary files differ
diff --git a/linux-x86/bin/versioner b/linux-x86/bin/versioner
index 98a5765..33157b7 100755
--- a/linux-x86/bin/versioner
+++ b/linux-x86/bin/versioner
Binary files differ
diff --git a/linux-x86/clang-headers b/linux-x86/clang-headers
index fabd016..161b83d 120000
--- a/linux-x86/clang-headers
+++ b/linux-x86/clang-headers
@@ -1 +1 @@
-lib64/clang/14.0.2/include
\ No newline at end of file
+lib64/clang/14.0.6/include
\ No newline at end of file
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h b/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h
deleted file mode 100644
index 538556f..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h
+++ /dev/null
@@ -1,348 +0,0 @@
-/*===---- __clang_cuda_math.h - Device-side CUDA math support --------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __CLANG_CUDA_MATH_H__
-#define __CLANG_CUDA_MATH_H__
-#ifndef __CUDA__
-#error "This file is for CUDA compilation only."
-#endif
-
-#ifndef __OPENMP_NVPTX__
-#if CUDA_VERSION < 9000
-#error This file is intended to be used with CUDA-9+ only.
-#endif
-#endif
-
-// __DEVICE__ is a helper macro with common set of attributes for the wrappers
-// we implement in this file. We need static in order to avoid emitting unused
-// functions and __forceinline__ helps inlining these wrappers at -O1.
-#pragma push_macro("__DEVICE__")
-#ifdef __OPENMP_NVPTX__
-#if defined(__cplusplus)
-#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
-#else
-#define __DEVICE__ static __attribute__((always_inline, nothrow))
-#endif
-#else
-#define __DEVICE__ static __device__ __forceinline__
-#endif
-
-// Specialized version of __DEVICE__ for functions with void return type. Needed
-// because the OpenMP overlay requires constexpr functions here but prior to
-// c++14 void return functions could not be constexpr.
-#pragma push_macro("__DEVICE_VOID__")
-#ifdef __OPENMP_NVPTX__ && defined(__cplusplus) && __cplusplus < 201402L
-#define __DEVICE_VOID__ static __attribute__((always_inline, nothrow))
-#else
-#define __DEVICE_VOID__ __DEVICE__
-#endif
-
-// libdevice provides fast low precision and slow full-recision implementations
-// for some functions. Which one gets selected depends on
-// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
-// -ffast-math or -fcuda-approx-transcendentals are in effect.
-#pragma push_macro("__FAST_OR_SLOW")
-#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
-#define __FAST_OR_SLOW(fast, slow) fast
-#else
-#define __FAST_OR_SLOW(fast, slow) slow
-#endif
-
-__DEVICE__ int abs(int __a) { return __nv_abs(__a); }
-__DEVICE__ double fabs(double __a) { return __nv_fabs(__a); }
-__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
-__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
-__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
-__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
-__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
-__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
-__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
-__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
-__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
-__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
-__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
-__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
-__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
-__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
-__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
-__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
-__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
-__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
-__DEVICE__ double copysign(double __a, double __b) {
-  return __nv_copysign(__a, __b);
-}
-__DEVICE__ float copysignf(float __a, float __b) {
-  return __nv_copysignf(__a, __b);
-}
-__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
-__DEVICE__ float cosf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
-}
-__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
-__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
-__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
-__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
-__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
-__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
-__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
-__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
-__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
-__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
-__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
-__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
-__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
-__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
-__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
-__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
-__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
-__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
-__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
-__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
-__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
-__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
-__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
-__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
-__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
-__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
-__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
-__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
-__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
-__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
-__DEVICE__ float fdividef(float __a, float __b) {
-#if __FAST_MATH__ && !__CUDA_PREC_DIV
-  return __nv_fast_fdividef(__a, __b);
-#else
-  return __a / __b;
-#endif
-}
-__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
-__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
-__DEVICE__ double fma(double __a, double __b, double __c) {
-  return __nv_fma(__a, __b, __c);
-}
-__DEVICE__ float fmaf(float __a, float __b, float __c) {
-  return __nv_fmaf(__a, __b, __c);
-}
-__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
-__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
-__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
-__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
-__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
-__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
-__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
-__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
-__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
-__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
-__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
-__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
-__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
-__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
-__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
-__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
-__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
-__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long labs(long __a) { return __nv_llabs(__a); };
-#else
-__DEVICE__ long labs(long __a) { return __nv_abs(__a); };
-#endif
-__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
-__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
-__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
-__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
-__DEVICE__ long long llabs(long long __a) { return __nv_llabs(__a); }
-__DEVICE__ long long llmax(long long __a, long long __b) {
-  return __nv_llmax(__a, __b);
-}
-__DEVICE__ long long llmin(long long __a, long long __b) {
-  return __nv_llmin(__a, __b);
-}
-__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
-__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
-__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
-__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
-__DEVICE__ double round(double __a) { return __nv_round(__a); }
-__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
-__DEVICE__ double log(double __a) { return __nv_log(__a); }
-__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
-__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
-__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
-__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
-__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
-__DEVICE__ float log2f(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
-}
-__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
-__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
-__DEVICE__ float logf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
-}
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long lrint(double __a) { return llrint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
-__DEVICE__ long lround(double __a) { return llround(__a); }
-__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
-#else
-__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
-__DEVICE__ long lround(double __a) { return round(__a); }
-__DEVICE__ long lroundf(float __a) { return roundf(__a); }
-#endif
-__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
-__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
-__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
-__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
-__DEVICE__ double nearbyint(double __a) { return __builtin_nearbyint(__a); }
-__DEVICE__ float nearbyintf(float __a) { return __builtin_nearbyintf(__a); }
-__DEVICE__ double nextafter(double __a, double __b) {
-  return __nv_nextafter(__a, __b);
-}
-__DEVICE__ float nextafterf(float __a, float __b) {
-  return __nv_nextafterf(__a, __b);
-}
-__DEVICE__ double norm(int __dim, const double *__t) {
-  return __nv_norm(__dim, __t);
-}
-__DEVICE__ double norm3d(double __a, double __b, double __c) {
-  return __nv_norm3d(__a, __b, __c);
-}
-__DEVICE__ float norm3df(float __a, float __b, float __c) {
-  return __nv_norm3df(__a, __b, __c);
-}
-__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
-  return __nv_norm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
-  return __nv_norm4df(__a, __b, __c, __d);
-}
-__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
-__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
-__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
-__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
-__DEVICE__ float normf(int __dim, const float *__t) {
-  return __nv_normf(__dim, __t);
-}
-__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
-__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
-__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
-__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
-__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
-__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
-__DEVICE__ double remainder(double __a, double __b) {
-  return __nv_remainder(__a, __b);
-}
-__DEVICE__ float remainderf(float __a, float __b) {
-  return __nv_remainderf(__a, __b);
-}
-__DEVICE__ double remquo(double __a, double __b, int *__c) {
-  return __nv_remquo(__a, __b, __c);
-}
-__DEVICE__ float remquof(float __a, float __b, int *__c) {
-  return __nv_remquof(__a, __b, __c);
-}
-__DEVICE__ double rhypot(double __a, double __b) {
-  return __nv_rhypot(__a, __b);
-}
-__DEVICE__ float rhypotf(float __a, float __b) {
-  return __nv_rhypotf(__a, __b);
-}
-// __nv_rint* in libdevice is buggy and produces incorrect results.
-__DEVICE__ double rint(double __a) { return __builtin_rint(__a); }
-__DEVICE__ float rintf(float __a) { return __builtin_rintf(__a); }
-__DEVICE__ double rnorm(int __a, const double *__b) {
-  return __nv_rnorm(__a, __b);
-}
-__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
-  return __nv_rnorm3d(__a, __b, __c);
-}
-__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
-  return __nv_rnorm3df(__a, __b, __c);
-}
-__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
-  return __nv_rnorm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
-  return __nv_rnorm4df(__a, __b, __c, __d);
-}
-__DEVICE__ float rnormf(int __dim, const float *__t) {
-  return __nv_rnormf(__dim, __t);
-}
-__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
-__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
-__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
-__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
-__DEVICE__ double scalbln(double __a, long __b) {
-  if (__b > INT_MAX)
-    return __a > 0 ? HUGE_VAL : -HUGE_VAL;
-  if (__b < INT_MIN)
-    return __a > 0 ? 0.0 : -0.0;
-  return scalbn(__a, (int)__b);
-}
-__DEVICE__ float scalblnf(float __a, long __b) {
-  if (__b > INT_MAX)
-    return __a > 0 ? HUGE_VALF : -HUGE_VALF;
-  if (__b < INT_MIN)
-    return __a > 0 ? 0.f : -0.f;
-  return scalbnf(__a, (int)__b);
-}
-__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
-__DEVICE_VOID__ void sincos(double __a, double *__s, double *__c) {
-  return __nv_sincos(__a, __s, __c);
-}
-__DEVICE_VOID__ void sincosf(float __a, float *__s, float *__c) {
-  return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
-}
-__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c) {
-  return __nv_sincospi(__a, __s, __c);
-}
-__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c) {
-  return __nv_sincospif(__a, __s, __c);
-}
-__DEVICE__ float sinf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
-}
-__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
-__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
-__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
-__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
-__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
-__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
-__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
-__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
-__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
-__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
-__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
-__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
-__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
-__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
-__DEVICE__ unsigned long long ullmax(unsigned long long __a,
-                                     unsigned long long __b) {
-  return __nv_ullmax(__a, __b);
-}
-__DEVICE__ unsigned long long ullmin(unsigned long long __a,
-                                     unsigned long long __b) {
-  return __nv_ullmin(__a, __b);
-}
-__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
-  return __nv_umax(__a, __b);
-}
-__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
-  return __nv_umin(__a, __b);
-}
-__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
-__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
-__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
-__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
-__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
-__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
-
-#pragma pop_macro("__DEVICE__")
-#pragma pop_macro("__DEVICE_VOID__")
-#pragma pop_macro("__FAST_OR_SLOW")
-
-#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h b/linux-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h
deleted file mode 100644
index 73021d2..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*===---- __clang_hip_runtime_wrapper.h - HIP runtime support ---------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/*
- * WARNING: This header is intended to be directly -include'd by
- * the compiler and is not supposed to be included by users.
- *
- */
-
-#ifndef __CLANG_HIP_RUNTIME_WRAPPER_H__
-#define __CLANG_HIP_RUNTIME_WRAPPER_H__
-
-#if __HIP__
-
-#define __host__ __attribute__((host))
-#define __device__ __attribute__((device))
-#define __global__ __attribute__((global))
-#define __shared__ __attribute__((shared))
-#define __constant__ __attribute__((constant))
-#define __managed__ __attribute__((managed))
-
-#if !defined(__cplusplus) || __cplusplus < 201103L
-  #define nullptr NULL;
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-  __attribute__((__visibility__("default")))
-  __attribute__((weak))
-  __attribute__((noreturn))
-  __device__ void __cxa_pure_virtual(void) {
-    __builtin_trap();
-  }
-  __attribute__((__visibility__("default")))
-  __attribute__((weak))
-  __attribute__((noreturn))
-  __device__ void __cxa_deleted_virtual(void) {
-    __builtin_trap();
-  }
-}
-#endif //__cplusplus
-
-#if !defined(__HIPCC_RTC__)
-#include <cmath>
-#include <cstdlib>
-#include <stdlib.h>
-#else
-typedef __SIZE_TYPE__ size_t;
-// Define macros which are needed to declare HIP device API's without standard
-// C/C++ headers. This is for readability so that these API's can be written
-// the same way as non-hipRTC use case. These macros need to be popped so that
-// they do not pollute users' name space.
-#pragma push_macro("NULL")
-#pragma push_macro("uint32_t")
-#pragma push_macro("uint64_t")
-#pragma push_macro("CHAR_BIT")
-#pragma push_macro("INT_MAX")
-#define NULL (void *)0
-#define uint32_t __UINT32_TYPE__
-#define uint64_t __UINT64_TYPE__
-#define CHAR_BIT __CHAR_BIT__
-#define INT_MAX __INTMAX_MAX__
-#endif // __HIPCC_RTC__
-
-typedef __SIZE_TYPE__ __hip_size_t;
-
-#ifdef __cplusplus
-extern "C" {
-#endif //__cplusplus
-
-#if __HIP_ENABLE_DEVICE_MALLOC__
-__device__ void *__hip_malloc(__hip_size_t __size);
-__device__ void *__hip_free(void *__ptr);
-__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
-  return __hip_malloc(__size);
-}
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
-  return __hip_free(__ptr);
-}
-#else
-__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
-  __builtin_trap();
-  return (void *)0;
-}
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
-  __builtin_trap();
-  return (void *)0;
-}
-#endif
-
-#ifdef __cplusplus
-} // extern "C"
-#endif //__cplusplus
-
-#include <__clang_hip_libdevice_declares.h>
-#include <__clang_hip_math.h>
-
-#if defined(__HIPCC_RTC__)
-#include <__clang_hip_cmath.h>
-#else
-#include <__clang_cuda_math_forward_declares.h>
-#include <__clang_hip_cmath.h>
-#include <__clang_cuda_complex_builtins.h>
-#include <algorithm>
-#include <complex>
-#include <new>
-#endif // __HIPCC_RTC__
-
-#define __CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__ 1
-#if defined(__HIPCC_RTC__)
-#pragma pop_macro("NULL")
-#pragma pop_macro("uint32_t")
-#pragma pop_macro("uint64_t")
-#pragma pop_macro("CHAR_BIT")
-#pragma pop_macro("INT_MAX")
-#endif // __HIPCC_RTC__
-#endif // __HIP__
-#endif // __CLANG_HIP_RUNTIME_WRAPPER_H__
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_neon.h b/linux-x86/lib64/clang/14.0.2/include/arm_neon.h
deleted file mode 100644
index 2448870..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/arm_neon.h
+++ /dev/null
@@ -1,69894 +0,0 @@
-/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_NEON_H
-#define __ARM_NEON_H
-
-#ifndef __ARM_FP
-#error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard"
-#else
-
-#if !defined(__ARM_NEON)
-#error "NEON support not enabled"
-#else
-
-#include <stdint.h>
-
-#ifdef __ARM_FEATURE_BF16
-#include <arm_bf16.h>
-typedef __bf16 bfloat16_t;
-#endif
-
-typedef float float32_t;
-typedef __fp16 float16_t;
-#ifdef __aarch64__
-typedef double float64_t;
-#endif
-
-#ifdef __aarch64__
-typedef uint8_t poly8_t;
-typedef uint16_t poly16_t;
-typedef uint64_t poly64_t;
-typedef __uint128_t poly128_t;
-#else
-typedef int8_t poly8_t;
-typedef int16_t poly16_t;
-typedef int64_t poly64_t;
-#endif
-typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
-typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
-typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
-typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
-typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
-typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
-typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
-typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
-typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
-typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
-typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
-typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
-typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
-typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
-typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
-typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
-typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
-typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
-typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
-typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
-#ifdef __aarch64__
-typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
-typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
-#endif
-typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
-typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
-typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
-typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
-typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
-typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
-
-typedef struct int8x8x2_t {
-  int8x8_t val[2];
-} int8x8x2_t;
-
-typedef struct int8x16x2_t {
-  int8x16_t val[2];
-} int8x16x2_t;
-
-typedef struct int16x4x2_t {
-  int16x4_t val[2];
-} int16x4x2_t;
-
-typedef struct int16x8x2_t {
-  int16x8_t val[2];
-} int16x8x2_t;
-
-typedef struct int32x2x2_t {
-  int32x2_t val[2];
-} int32x2x2_t;
-
-typedef struct int32x4x2_t {
-  int32x4_t val[2];
-} int32x4x2_t;
-
-typedef struct int64x1x2_t {
-  int64x1_t val[2];
-} int64x1x2_t;
-
-typedef struct int64x2x2_t {
-  int64x2_t val[2];
-} int64x2x2_t;
-
-typedef struct uint8x8x2_t {
-  uint8x8_t val[2];
-} uint8x8x2_t;
-
-typedef struct uint8x16x2_t {
-  uint8x16_t val[2];
-} uint8x16x2_t;
-
-typedef struct uint16x4x2_t {
-  uint16x4_t val[2];
-} uint16x4x2_t;
-
-typedef struct uint16x8x2_t {
-  uint16x8_t val[2];
-} uint16x8x2_t;
-
-typedef struct uint32x2x2_t {
-  uint32x2_t val[2];
-} uint32x2x2_t;
-
-typedef struct uint32x4x2_t {
-  uint32x4_t val[2];
-} uint32x4x2_t;
-
-typedef struct uint64x1x2_t {
-  uint64x1_t val[2];
-} uint64x1x2_t;
-
-typedef struct uint64x2x2_t {
-  uint64x2_t val[2];
-} uint64x2x2_t;
-
-typedef struct float16x4x2_t {
-  float16x4_t val[2];
-} float16x4x2_t;
-
-typedef struct float16x8x2_t {
-  float16x8_t val[2];
-} float16x8x2_t;
-
-typedef struct float32x2x2_t {
-  float32x2_t val[2];
-} float32x2x2_t;
-
-typedef struct float32x4x2_t {
-  float32x4_t val[2];
-} float32x4x2_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x2_t {
-  float64x1_t val[2];
-} float64x1x2_t;
-
-typedef struct float64x2x2_t {
-  float64x2_t val[2];
-} float64x2x2_t;
-
-#endif
-typedef struct poly8x8x2_t {
-  poly8x8_t val[2];
-} poly8x8x2_t;
-
-typedef struct poly8x16x2_t {
-  poly8x16_t val[2];
-} poly8x16x2_t;
-
-typedef struct poly16x4x2_t {
-  poly16x4_t val[2];
-} poly16x4x2_t;
-
-typedef struct poly16x8x2_t {
-  poly16x8_t val[2];
-} poly16x8x2_t;
-
-typedef struct poly64x1x2_t {
-  poly64x1_t val[2];
-} poly64x1x2_t;
-
-typedef struct poly64x2x2_t {
-  poly64x2_t val[2];
-} poly64x2x2_t;
-
-typedef struct int8x8x3_t {
-  int8x8_t val[3];
-} int8x8x3_t;
-
-typedef struct int8x16x3_t {
-  int8x16_t val[3];
-} int8x16x3_t;
-
-typedef struct int16x4x3_t {
-  int16x4_t val[3];
-} int16x4x3_t;
-
-typedef struct int16x8x3_t {
-  int16x8_t val[3];
-} int16x8x3_t;
-
-typedef struct int32x2x3_t {
-  int32x2_t val[3];
-} int32x2x3_t;
-
-typedef struct int32x4x3_t {
-  int32x4_t val[3];
-} int32x4x3_t;
-
-typedef struct int64x1x3_t {
-  int64x1_t val[3];
-} int64x1x3_t;
-
-typedef struct int64x2x3_t {
-  int64x2_t val[3];
-} int64x2x3_t;
-
-typedef struct uint8x8x3_t {
-  uint8x8_t val[3];
-} uint8x8x3_t;
-
-typedef struct uint8x16x3_t {
-  uint8x16_t val[3];
-} uint8x16x3_t;
-
-typedef struct uint16x4x3_t {
-  uint16x4_t val[3];
-} uint16x4x3_t;
-
-typedef struct uint16x8x3_t {
-  uint16x8_t val[3];
-} uint16x8x3_t;
-
-typedef struct uint32x2x3_t {
-  uint32x2_t val[3];
-} uint32x2x3_t;
-
-typedef struct uint32x4x3_t {
-  uint32x4_t val[3];
-} uint32x4x3_t;
-
-typedef struct uint64x1x3_t {
-  uint64x1_t val[3];
-} uint64x1x3_t;
-
-typedef struct uint64x2x3_t {
-  uint64x2_t val[3];
-} uint64x2x3_t;
-
-typedef struct float16x4x3_t {
-  float16x4_t val[3];
-} float16x4x3_t;
-
-typedef struct float16x8x3_t {
-  float16x8_t val[3];
-} float16x8x3_t;
-
-typedef struct float32x2x3_t {
-  float32x2_t val[3];
-} float32x2x3_t;
-
-typedef struct float32x4x3_t {
-  float32x4_t val[3];
-} float32x4x3_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x3_t {
-  float64x1_t val[3];
-} float64x1x3_t;
-
-typedef struct float64x2x3_t {
-  float64x2_t val[3];
-} float64x2x3_t;
-
-#endif
-typedef struct poly8x8x3_t {
-  poly8x8_t val[3];
-} poly8x8x3_t;
-
-typedef struct poly8x16x3_t {
-  poly8x16_t val[3];
-} poly8x16x3_t;
-
-typedef struct poly16x4x3_t {
-  poly16x4_t val[3];
-} poly16x4x3_t;
-
-typedef struct poly16x8x3_t {
-  poly16x8_t val[3];
-} poly16x8x3_t;
-
-typedef struct poly64x1x3_t {
-  poly64x1_t val[3];
-} poly64x1x3_t;
-
-typedef struct poly64x2x3_t {
-  poly64x2_t val[3];
-} poly64x2x3_t;
-
-typedef struct int8x8x4_t {
-  int8x8_t val[4];
-} int8x8x4_t;
-
-typedef struct int8x16x4_t {
-  int8x16_t val[4];
-} int8x16x4_t;
-
-typedef struct int16x4x4_t {
-  int16x4_t val[4];
-} int16x4x4_t;
-
-typedef struct int16x8x4_t {
-  int16x8_t val[4];
-} int16x8x4_t;
-
-typedef struct int32x2x4_t {
-  int32x2_t val[4];
-} int32x2x4_t;
-
-typedef struct int32x4x4_t {
-  int32x4_t val[4];
-} int32x4x4_t;
-
-typedef struct int64x1x4_t {
-  int64x1_t val[4];
-} int64x1x4_t;
-
-typedef struct int64x2x4_t {
-  int64x2_t val[4];
-} int64x2x4_t;
-
-typedef struct uint8x8x4_t {
-  uint8x8_t val[4];
-} uint8x8x4_t;
-
-typedef struct uint8x16x4_t {
-  uint8x16_t val[4];
-} uint8x16x4_t;
-
-typedef struct uint16x4x4_t {
-  uint16x4_t val[4];
-} uint16x4x4_t;
-
-typedef struct uint16x8x4_t {
-  uint16x8_t val[4];
-} uint16x8x4_t;
-
-typedef struct uint32x2x4_t {
-  uint32x2_t val[4];
-} uint32x2x4_t;
-
-typedef struct uint32x4x4_t {
-  uint32x4_t val[4];
-} uint32x4x4_t;
-
-typedef struct uint64x1x4_t {
-  uint64x1_t val[4];
-} uint64x1x4_t;
-
-typedef struct uint64x2x4_t {
-  uint64x2_t val[4];
-} uint64x2x4_t;
-
-typedef struct float16x4x4_t {
-  float16x4_t val[4];
-} float16x4x4_t;
-
-typedef struct float16x8x4_t {
-  float16x8_t val[4];
-} float16x8x4_t;
-
-typedef struct float32x2x4_t {
-  float32x2_t val[4];
-} float32x2x4_t;
-
-typedef struct float32x4x4_t {
-  float32x4_t val[4];
-} float32x4x4_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x4_t {
-  float64x1_t val[4];
-} float64x1x4_t;
-
-typedef struct float64x2x4_t {
-  float64x2_t val[4];
-} float64x2x4_t;
-
-#endif
-typedef struct poly8x8x4_t {
-  poly8x8_t val[4];
-} poly8x8x4_t;
-
-typedef struct poly8x16x4_t {
-  poly8x16_t val[4];
-} poly8x16x4_t;
-
-typedef struct poly16x4x4_t {
-  poly16x4_t val[4];
-} poly16x4x4_t;
-
-typedef struct poly16x8x4_t {
-  poly16x8_t val[4];
-} poly16x8x4_t;
-
-typedef struct poly64x1x4_t {
-  poly64x1_t val[4];
-} poly64x1x4_t;
-
-typedef struct poly64x2x4_t {
-  poly64x2_t val[4];
-} poly64x2x4_t;
-
-#ifdef __ARM_FEATURE_BF16
-typedef __attribute__((neon_vector_type(4))) bfloat16_t bfloat16x4_t;
-typedef __attribute__((neon_vector_type(8))) bfloat16_t bfloat16x8_t;
-
-typedef struct bfloat16x4x2_t {
-  bfloat16x4_t val[2];
-} bfloat16x4x2_t;
-
-typedef struct bfloat16x8x2_t {
-  bfloat16x8_t val[2];
-} bfloat16x8x2_t;
-
-typedef struct bfloat16x4x3_t {
-  bfloat16x4_t val[3];
-} bfloat16x4x3_t;
-
-typedef struct bfloat16x8x3_t {
-  bfloat16x8_t val[3];
-} bfloat16x8x3_t;
-
-typedef struct bfloat16x4x4_t {
-  bfloat16x4_t val[4];
-} bfloat16x4x4_t;
-
-typedef struct bfloat16x8x4_t {
-  bfloat16x8_t val[4];
-} bfloat16x8x4_t;
-
-#endif
-
-#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
-  __ret; \
-})
-#else
-#define splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
-  __ret; \
-})
-#endif
-
-#define splat_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
-  __ret; \
-})
-#else
-#define splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
-  __ret; \
-})
-#else
-#define splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
-  __ret; \
-})
-#else
-#define splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
-  __ret; \
-})
-#else
-#define splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#else
-#define splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
-  __ret; \
-})
-#else
-#define splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
-  __ret; \
-})
-#else
-#define splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
-  __ret; \
-})
-#else
-#define splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#else
-#define splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#define splat_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#define splat_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
-  __ret; \
-})
-#else
-#define splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
-  __ret; \
-})
-#else
-#define splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#define splat_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
-  __ret; \
-})
-#else
-#define splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
-  __ret; \
-})
-#else
-#define splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \
-  __ret; \
-})
-#define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
-  __ret; \
-})
-#else
-#define splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
-  __ret; \
-})
-#else
-#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
-  __ret; \
-})
-#else
-#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
-  __ret; \
-})
-#else
-#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
-  __ret; \
-})
-#else
-#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
-  __ret; \
-})
-#else
-#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
-  __ret; \
-})
-#else
-#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \
-  __ret; \
-})
-#define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
-  __ret; \
-})
-#else
-#define splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \
-  __ret; \
-})
-#define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
-  __ret; \
-})
-#else
-#define splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
-  __ret; \
-})
-#else
-#define splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \
-  __ret; \
-})
-#define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vabsq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vabsq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vabs_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vabs_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vabs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vabs_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vabs_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai poly64x1_t vadd_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 6);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 38);
-  return __ret;
-}
-#else
-__ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 38);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5);
-  return __ret;
-}
-#else
-__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37);
-  return __ret;
-}
-#else
-__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vclsq_u8(uint8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vclsq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vclsq_u32(uint32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vclsq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vclsq_u16(uint16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vclsq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vclsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vclsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vclsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vclsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vclsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vclsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vcls_u8(uint8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vcls_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcls_u32(uint32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcls_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcls_u16(uint16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcls_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vcls_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vcls_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcls_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcls_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcls_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcls_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vclzq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vclzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vclzq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vclzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vclzq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vclzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vclz_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vclz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vclz_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vclz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vclz_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vclz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vcntq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vcntq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vcnt_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vcnt_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#define vcreate_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly8x8_t)(__promote); \
-  __ret; \
-})
-#define vcreate_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly16x4_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint8x8_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint32x2_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint16x4_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int8x8_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float32x2_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float16x4_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int32x2_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int16x4_t)(__promote); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p8(__p0_0, __p1_0) __extension__ ({ \
-  poly8x8_t __s0_0 = __p0_0; \
-  poly8x8_t __ret_0; \
-  __ret_0 = splat_lane_p8(__s0_0, __p1_0); \
-  __ret_0; \
-})
-#else
-#define vdup_lane_p8(__p0_1, __p1_1) __extension__ ({ \
-  poly8x8_t __s0_1 = __p0_1; \
-  poly8x8_t __rev0_1;  __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_1; \
-  __ret_1 = __noswap_splat_lane_p8(__rev0_1, __p1_1); \
-  __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_1; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p16(__p0_2, __p1_2) __extension__ ({ \
-  poly16x4_t __s0_2 = __p0_2; \
-  poly16x4_t __ret_2; \
-  __ret_2 = splat_lane_p16(__s0_2, __p1_2); \
-  __ret_2; \
-})
-#else
-#define vdup_lane_p16(__p0_3, __p1_3) __extension__ ({ \
-  poly16x4_t __s0_3 = __p0_3; \
-  poly16x4_t __rev0_3;  __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \
-  poly16x4_t __ret_3; \
-  __ret_3 = __noswap_splat_lane_p16(__rev0_3, __p1_3); \
-  __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \
-  __ret_3; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p8(__p0_4, __p1_4) __extension__ ({ \
-  poly8x8_t __s0_4 = __p0_4; \
-  poly8x16_t __ret_4; \
-  __ret_4 = splatq_lane_p8(__s0_4, __p1_4); \
-  __ret_4; \
-})
-#else
-#define vdupq_lane_p8(__p0_5, __p1_5) __extension__ ({ \
-  poly8x8_t __s0_5 = __p0_5; \
-  poly8x8_t __rev0_5;  __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_5; \
-  __ret_5 = __noswap_splatq_lane_p8(__rev0_5, __p1_5); \
-  __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_5; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p16(__p0_6, __p1_6) __extension__ ({ \
-  poly16x4_t __s0_6 = __p0_6; \
-  poly16x8_t __ret_6; \
-  __ret_6 = splatq_lane_p16(__s0_6, __p1_6); \
-  __ret_6; \
-})
-#else
-#define vdupq_lane_p16(__p0_7, __p1_7) __extension__ ({ \
-  poly16x4_t __s0_7 = __p0_7; \
-  poly16x4_t __rev0_7;  __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \
-  poly16x8_t __ret_7; \
-  __ret_7 = __noswap_splatq_lane_p16(__rev0_7, __p1_7); \
-  __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_7; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u8(__p0_8, __p1_8) __extension__ ({ \
-  uint8x8_t __s0_8 = __p0_8; \
-  uint8x16_t __ret_8; \
-  __ret_8 = splatq_lane_u8(__s0_8, __p1_8); \
-  __ret_8; \
-})
-#else
-#define vdupq_lane_u8(__p0_9, __p1_9) __extension__ ({ \
-  uint8x8_t __s0_9 = __p0_9; \
-  uint8x8_t __rev0_9;  __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_9; \
-  __ret_9 = __noswap_splatq_lane_u8(__rev0_9, __p1_9); \
-  __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_9; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u32(__p0_10, __p1_10) __extension__ ({ \
-  uint32x2_t __s0_10 = __p0_10; \
-  uint32x4_t __ret_10; \
-  __ret_10 = splatq_lane_u32(__s0_10, __p1_10); \
-  __ret_10; \
-})
-#else
-#define vdupq_lane_u32(__p0_11, __p1_11) __extension__ ({ \
-  uint32x2_t __s0_11 = __p0_11; \
-  uint32x2_t __rev0_11;  __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 1, 0); \
-  uint32x4_t __ret_11; \
-  __ret_11 = __noswap_splatq_lane_u32(__rev0_11, __p1_11); \
-  __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
-  __ret_11; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u64(__p0_12, __p1_12) __extension__ ({ \
-  uint64x1_t __s0_12 = __p0_12; \
-  uint64x2_t __ret_12; \
-  __ret_12 = splatq_lane_u64(__s0_12, __p1_12); \
-  __ret_12; \
-})
-#else
-#define vdupq_lane_u64(__p0_13, __p1_13) __extension__ ({ \
-  uint64x1_t __s0_13 = __p0_13; \
-  uint64x2_t __ret_13; \
-  __ret_13 = __noswap_splatq_lane_u64(__s0_13, __p1_13); \
-  __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 1, 0); \
-  __ret_13; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u16(__p0_14, __p1_14) __extension__ ({ \
-  uint16x4_t __s0_14 = __p0_14; \
-  uint16x8_t __ret_14; \
-  __ret_14 = splatq_lane_u16(__s0_14, __p1_14); \
-  __ret_14; \
-})
-#else
-#define vdupq_lane_u16(__p0_15, __p1_15) __extension__ ({ \
-  uint16x4_t __s0_15 = __p0_15; \
-  uint16x4_t __rev0_15;  __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \
-  uint16x8_t __ret_15; \
-  __ret_15 = __noswap_splatq_lane_u16(__rev0_15, __p1_15); \
-  __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_15; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s8(__p0_16, __p1_16) __extension__ ({ \
-  int8x8_t __s0_16 = __p0_16; \
-  int8x16_t __ret_16; \
-  __ret_16 = splatq_lane_s8(__s0_16, __p1_16); \
-  __ret_16; \
-})
-#else
-#define vdupq_lane_s8(__p0_17, __p1_17) __extension__ ({ \
-  int8x8_t __s0_17 = __p0_17; \
-  int8x8_t __rev0_17;  __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_17; \
-  __ret_17 = __noswap_splatq_lane_s8(__rev0_17, __p1_17); \
-  __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_17; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f32(__p0_18, __p1_18) __extension__ ({ \
-  float32x2_t __s0_18 = __p0_18; \
-  float32x4_t __ret_18; \
-  __ret_18 = splatq_lane_f32(__s0_18, __p1_18); \
-  __ret_18; \
-})
-#else
-#define vdupq_lane_f32(__p0_19, __p1_19) __extension__ ({ \
-  float32x2_t __s0_19 = __p0_19; \
-  float32x2_t __rev0_19;  __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \
-  float32x4_t __ret_19; \
-  __ret_19 = __noswap_splatq_lane_f32(__rev0_19, __p1_19); \
-  __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
-  __ret_19; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s32(__p0_20, __p1_20) __extension__ ({ \
-  int32x2_t __s0_20 = __p0_20; \
-  int32x4_t __ret_20; \
-  __ret_20 = splatq_lane_s32(__s0_20, __p1_20); \
-  __ret_20; \
-})
-#else
-#define vdupq_lane_s32(__p0_21, __p1_21) __extension__ ({ \
-  int32x2_t __s0_21 = __p0_21; \
-  int32x2_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
-  int32x4_t __ret_21; \
-  __ret_21 = __noswap_splatq_lane_s32(__rev0_21, __p1_21); \
-  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 3, 2, 1, 0); \
-  __ret_21; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s64(__p0_22, __p1_22) __extension__ ({ \
-  int64x1_t __s0_22 = __p0_22; \
-  int64x2_t __ret_22; \
-  __ret_22 = splatq_lane_s64(__s0_22, __p1_22); \
-  __ret_22; \
-})
-#else
-#define vdupq_lane_s64(__p0_23, __p1_23) __extension__ ({ \
-  int64x1_t __s0_23 = __p0_23; \
-  int64x2_t __ret_23; \
-  __ret_23 = __noswap_splatq_lane_s64(__s0_23, __p1_23); \
-  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 1, 0); \
-  __ret_23; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s16(__p0_24, __p1_24) __extension__ ({ \
-  int16x4_t __s0_24 = __p0_24; \
-  int16x8_t __ret_24; \
-  __ret_24 = splatq_lane_s16(__s0_24, __p1_24); \
-  __ret_24; \
-})
-#else
-#define vdupq_lane_s16(__p0_25, __p1_25) __extension__ ({ \
-  int16x4_t __s0_25 = __p0_25; \
-  int16x4_t __rev0_25;  __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \
-  int16x8_t __ret_25; \
-  __ret_25 = __noswap_splatq_lane_s16(__rev0_25, __p1_25); \
-  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_25; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u8(__p0_26, __p1_26) __extension__ ({ \
-  uint8x8_t __s0_26 = __p0_26; \
-  uint8x8_t __ret_26; \
-  __ret_26 = splat_lane_u8(__s0_26, __p1_26); \
-  __ret_26; \
-})
-#else
-#define vdup_lane_u8(__p0_27, __p1_27) __extension__ ({ \
-  uint8x8_t __s0_27 = __p0_27; \
-  uint8x8_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_27; \
-  __ret_27 = __noswap_splat_lane_u8(__rev0_27, __p1_27); \
-  __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_27; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u32(__p0_28, __p1_28) __extension__ ({ \
-  uint32x2_t __s0_28 = __p0_28; \
-  uint32x2_t __ret_28; \
-  __ret_28 = splat_lane_u32(__s0_28, __p1_28); \
-  __ret_28; \
-})
-#else
-#define vdup_lane_u32(__p0_29, __p1_29) __extension__ ({ \
-  uint32x2_t __s0_29 = __p0_29; \
-  uint32x2_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \
-  uint32x2_t __ret_29; \
-  __ret_29 = __noswap_splat_lane_u32(__rev0_29, __p1_29); \
-  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \
-  __ret_29; \
-})
-#endif
-
-#define vdup_lane_u64(__p0_30, __p1_30) __extension__ ({ \
-  uint64x1_t __s0_30 = __p0_30; \
-  uint64x1_t __ret_30; \
-  __ret_30 = splat_lane_u64(__s0_30, __p1_30); \
-  __ret_30; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u16(__p0_31, __p1_31) __extension__ ({ \
-  uint16x4_t __s0_31 = __p0_31; \
-  uint16x4_t __ret_31; \
-  __ret_31 = splat_lane_u16(__s0_31, __p1_31); \
-  __ret_31; \
-})
-#else
-#define vdup_lane_u16(__p0_32, __p1_32) __extension__ ({ \
-  uint16x4_t __s0_32 = __p0_32; \
-  uint16x4_t __rev0_32;  __rev0_32 = __builtin_shufflevector(__s0_32, __s0_32, 3, 2, 1, 0); \
-  uint16x4_t __ret_32; \
-  __ret_32 = __noswap_splat_lane_u16(__rev0_32, __p1_32); \
-  __ret_32 = __builtin_shufflevector(__ret_32, __ret_32, 3, 2, 1, 0); \
-  __ret_32; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s8(__p0_33, __p1_33) __extension__ ({ \
-  int8x8_t __s0_33 = __p0_33; \
-  int8x8_t __ret_33; \
-  __ret_33 = splat_lane_s8(__s0_33, __p1_33); \
-  __ret_33; \
-})
-#else
-#define vdup_lane_s8(__p0_34, __p1_34) __extension__ ({ \
-  int8x8_t __s0_34 = __p0_34; \
-  int8x8_t __rev0_34;  __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_34; \
-  __ret_34 = __noswap_splat_lane_s8(__rev0_34, __p1_34); \
-  __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_34; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f32(__p0_35, __p1_35) __extension__ ({ \
-  float32x2_t __s0_35 = __p0_35; \
-  float32x2_t __ret_35; \
-  __ret_35 = splat_lane_f32(__s0_35, __p1_35); \
-  __ret_35; \
-})
-#else
-#define vdup_lane_f32(__p0_36, __p1_36) __extension__ ({ \
-  float32x2_t __s0_36 = __p0_36; \
-  float32x2_t __rev0_36;  __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 1, 0); \
-  float32x2_t __ret_36; \
-  __ret_36 = __noswap_splat_lane_f32(__rev0_36, __p1_36); \
-  __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 1, 0); \
-  __ret_36; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s32(__p0_37, __p1_37) __extension__ ({ \
-  int32x2_t __s0_37 = __p0_37; \
-  int32x2_t __ret_37; \
-  __ret_37 = splat_lane_s32(__s0_37, __p1_37); \
-  __ret_37; \
-})
-#else
-#define vdup_lane_s32(__p0_38, __p1_38) __extension__ ({ \
-  int32x2_t __s0_38 = __p0_38; \
-  int32x2_t __rev0_38;  __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \
-  int32x2_t __ret_38; \
-  __ret_38 = __noswap_splat_lane_s32(__rev0_38, __p1_38); \
-  __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \
-  __ret_38; \
-})
-#endif
-
-#define vdup_lane_s64(__p0_39, __p1_39) __extension__ ({ \
-  int64x1_t __s0_39 = __p0_39; \
-  int64x1_t __ret_39; \
-  __ret_39 = splat_lane_s64(__s0_39, __p1_39); \
-  __ret_39; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s16(__p0_40, __p1_40) __extension__ ({ \
-  int16x4_t __s0_40 = __p0_40; \
-  int16x4_t __ret_40; \
-  __ret_40 = splat_lane_s16(__s0_40, __p1_40); \
-  __ret_40; \
-})
-#else
-#define vdup_lane_s16(__p0_41, __p1_41) __extension__ ({ \
-  int16x4_t __s0_41 = __p0_41; \
-  int16x4_t __rev0_41;  __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 3, 2, 1, 0); \
-  int16x4_t __ret_41; \
-  __ret_41 = __noswap_splat_lane_s16(__rev0_41, __p1_41); \
-  __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 3, 2, 1, 0); \
-  __ret_41; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vdupq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x16_t vdupq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vdupq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x4_t vdupq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vdupq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x4_t vdupq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vdupq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int64x2_t vdupq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vdupq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x8_t vdupq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vdup_n_u64(uint64_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vdup_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x8_t vdup_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vdup_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x2_t vdup_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vdup_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x2_t vdup_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vdup_n_s64(int64_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vdup_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x4_t vdup_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
-  __ret; \
-})
-#else
-#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
-  __ret; \
-})
-#else
-#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vget_high_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vget_high_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vget_high_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vget_high_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vget_high_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vget_high_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vget_high_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vget_high_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vget_high_s64(int64x2_t __p0) {
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai int64x1_t vget_high_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vget_high_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vget_high_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vget_low_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int8x8_t vget_low_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vget_low_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai float32x2_t vget_low_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vget_low_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai float16x4_t vget_low_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vget_low_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai int32x2_t vget_low_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vget_low_s64(int64x2_t __p0) {
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai int64x1_t vget_low_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vget_low_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai int16x4_t vget_low_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_dup_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_dup_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_dup_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_dup_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_dup_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_dup_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_dup_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_dup_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_dup_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
-  __ret; \
-})
-#else
-#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x2(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x2(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x2(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x2(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x2(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x2(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x2(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x2(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x2(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x2(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x2(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x2(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x2(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x2(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x2(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x2(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x2(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x2(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x2(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x2(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x2(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x2(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x2(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x2(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x2(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x2(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x2(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x2(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x2(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x2(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x2(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x2(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x2(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x2(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x2(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x2(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x2(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x2(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x2(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x2(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x2(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x2(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x3(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x3(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x3(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x3(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x3(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x3(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x3(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x3(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x3(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x3(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x3(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x3(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x3(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x3(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x3(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x3(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x3(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x3(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x3(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x3(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x3(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x3(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x3(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x3(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x3(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x3(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x3(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x3(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x3(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x3(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x3(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x3(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x3(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x3(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x3(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x3(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x3(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x3(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x3(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x3(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x3(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x3(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x4(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x4(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x4(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x4(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x4(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x4(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x4(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x4(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x4(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x4(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x4(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x4(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x4(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x4(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x4(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x4(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x4(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x4(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x4(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x4(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x4(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x4(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x4(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x4(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x4(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x4(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x4(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x4(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x4(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x4(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x4(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x4(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x4(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x4(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x4(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x4(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x4(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x4(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x4(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x4(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x4(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x4(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld2_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld2_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld2q_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld2q_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld2q_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld2q_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld2q_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld2q_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld2q_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld2q_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld2q_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld2_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld2_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_u64(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld2_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld2_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld2_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld2_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_s64(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld2_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld2_dup_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld2_dup_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld2_dup_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld2_dup_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_u64(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld2_dup_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld2_dup_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld2_dup_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld2_dup_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_s64(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld2_dup_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld3_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld3_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld3q_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld3q_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld3q_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld3q_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld3q_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld3q_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld3q_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld3q_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld3q_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld3_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld3_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_u64(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld3_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld3_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld3_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld3_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_s64(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld3_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld3_dup_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld3_dup_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld3_dup_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld3_dup_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_u64(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld3_dup_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld3_dup_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld3_dup_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld3_dup_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_s64(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld3_dup_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld4_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld4_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld4q_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld4q_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld4q_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld4q_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld4q_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld4q_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld4q_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld4q_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld4q_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld4_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld4_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_u64(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld4_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld4_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld4_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld4_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_s64(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld4_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld4_dup_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld4_dup_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld4_dup_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld4_dup_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_u64(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld4_dup_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld4_dup_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld4_dup_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld4_dup_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_s64(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld4_dup_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u32(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
-  uint32x4_t __s0_42 = __p0_42; \
-  uint32x4_t __s1_42 = __p1_42; \
-  uint32x2_t __s2_42 = __p2_42; \
-  uint32x4_t __ret_42; \
-  __ret_42 = __s0_42 + __s1_42 * splatq_lane_u32(__s2_42, __p3_42); \
-  __ret_42; \
-})
-#else
-#define vmlaq_lane_u32(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
-  uint32x4_t __s0_43 = __p0_43; \
-  uint32x4_t __s1_43 = __p1_43; \
-  uint32x2_t __s2_43 = __p2_43; \
-  uint32x4_t __rev0_43;  __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \
-  uint32x4_t __rev1_43;  __rev1_43 = __builtin_shufflevector(__s1_43, __s1_43, 3, 2, 1, 0); \
-  uint32x2_t __rev2_43;  __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 1, 0); \
-  uint32x4_t __ret_43; \
-  __ret_43 = __rev0_43 + __rev1_43 * __noswap_splatq_lane_u32(__rev2_43, __p3_43); \
-  __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \
-  __ret_43; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
-  uint16x8_t __s0_44 = __p0_44; \
-  uint16x8_t __s1_44 = __p1_44; \
-  uint16x4_t __s2_44 = __p2_44; \
-  uint16x8_t __ret_44; \
-  __ret_44 = __s0_44 + __s1_44 * splatq_lane_u16(__s2_44, __p3_44); \
-  __ret_44; \
-})
-#else
-#define vmlaq_lane_u16(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
-  uint16x8_t __s0_45 = __p0_45; \
-  uint16x8_t __s1_45 = __p1_45; \
-  uint16x4_t __s2_45 = __p2_45; \
-  uint16x8_t __rev0_45;  __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_45;  __rev1_45 = __builtin_shufflevector(__s1_45, __s1_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_45;  __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 3, 2, 1, 0); \
-  uint16x8_t __ret_45; \
-  __ret_45 = __rev0_45 + __rev1_45 * __noswap_splatq_lane_u16(__rev2_45, __p3_45); \
-  __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_45; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_f32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
-  float32x4_t __s0_46 = __p0_46; \
-  float32x4_t __s1_46 = __p1_46; \
-  float32x2_t __s2_46 = __p2_46; \
-  float32x4_t __ret_46; \
-  __ret_46 = __s0_46 + __s1_46 * splatq_lane_f32(__s2_46, __p3_46); \
-  __ret_46; \
-})
-#else
-#define vmlaq_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
-  float32x4_t __s0_47 = __p0_47; \
-  float32x4_t __s1_47 = __p1_47; \
-  float32x2_t __s2_47 = __p2_47; \
-  float32x4_t __rev0_47;  __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \
-  float32x4_t __rev1_47;  __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \
-  float32x2_t __rev2_47;  __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \
-  float32x4_t __ret_47; \
-  __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_f32(__rev2_47, __p3_47); \
-  __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \
-  __ret_47; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
-  int32x4_t __s0_48 = __p0_48; \
-  int32x4_t __s1_48 = __p1_48; \
-  int32x2_t __s2_48 = __p2_48; \
-  int32x4_t __ret_48; \
-  __ret_48 = __s0_48 + __s1_48 * splatq_lane_s32(__s2_48, __p3_48); \
-  __ret_48; \
-})
-#else
-#define vmlaq_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
-  int32x4_t __s0_49 = __p0_49; \
-  int32x4_t __s1_49 = __p1_49; \
-  int32x2_t __s2_49 = __p2_49; \
-  int32x4_t __rev0_49;  __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 3, 2, 1, 0); \
-  int32x4_t __rev1_49;  __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 3, 2, 1, 0); \
-  int32x2_t __rev2_49;  __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 1, 0); \
-  int32x4_t __ret_49; \
-  __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_s32(__rev2_49, __p3_49); \
-  __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 3, 2, 1, 0); \
-  __ret_49; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s16(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
-  int16x8_t __s0_50 = __p0_50; \
-  int16x8_t __s1_50 = __p1_50; \
-  int16x4_t __s2_50 = __p2_50; \
-  int16x8_t __ret_50; \
-  __ret_50 = __s0_50 + __s1_50 * splatq_lane_s16(__s2_50, __p3_50); \
-  __ret_50; \
-})
-#else
-#define vmlaq_lane_s16(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
-  int16x8_t __s0_51 = __p0_51; \
-  int16x8_t __s1_51 = __p1_51; \
-  int16x4_t __s2_51 = __p2_51; \
-  int16x8_t __rev0_51;  __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_51;  __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_51;  __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \
-  int16x8_t __ret_51; \
-  __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_s16(__rev2_51, __p3_51); \
-  __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_51; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
-  uint32x2_t __s0_52 = __p0_52; \
-  uint32x2_t __s1_52 = __p1_52; \
-  uint32x2_t __s2_52 = __p2_52; \
-  uint32x2_t __ret_52; \
-  __ret_52 = __s0_52 + __s1_52 * splat_lane_u32(__s2_52, __p3_52); \
-  __ret_52; \
-})
-#else
-#define vmla_lane_u32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
-  uint32x2_t __s0_53 = __p0_53; \
-  uint32x2_t __s1_53 = __p1_53; \
-  uint32x2_t __s2_53 = __p2_53; \
-  uint32x2_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \
-  uint32x2_t __rev1_53;  __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 1, 0); \
-  uint32x2_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \
-  uint32x2_t __ret_53; \
-  __ret_53 = __rev0_53 + __rev1_53 * __noswap_splat_lane_u32(__rev2_53, __p3_53); \
-  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \
-  __ret_53; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
-  uint16x4_t __s0_54 = __p0_54; \
-  uint16x4_t __s1_54 = __p1_54; \
-  uint16x4_t __s2_54 = __p2_54; \
-  uint16x4_t __ret_54; \
-  __ret_54 = __s0_54 + __s1_54 * splat_lane_u16(__s2_54, __p3_54); \
-  __ret_54; \
-})
-#else
-#define vmla_lane_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
-  uint16x4_t __s0_55 = __p0_55; \
-  uint16x4_t __s1_55 = __p1_55; \
-  uint16x4_t __s2_55 = __p2_55; \
-  uint16x4_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \
-  uint16x4_t __rev1_55;  __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \
-  uint16x4_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \
-  uint16x4_t __ret_55; \
-  __ret_55 = __rev0_55 + __rev1_55 * __noswap_splat_lane_u16(__rev2_55, __p3_55); \
-  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \
-  __ret_55; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_f32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
-  float32x2_t __s0_56 = __p0_56; \
-  float32x2_t __s1_56 = __p1_56; \
-  float32x2_t __s2_56 = __p2_56; \
-  float32x2_t __ret_56; \
-  __ret_56 = __s0_56 + __s1_56 * splat_lane_f32(__s2_56, __p3_56); \
-  __ret_56; \
-})
-#else
-#define vmla_lane_f32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
-  float32x2_t __s0_57 = __p0_57; \
-  float32x2_t __s1_57 = __p1_57; \
-  float32x2_t __s2_57 = __p2_57; \
-  float32x2_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \
-  float32x2_t __rev1_57;  __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \
-  float32x2_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \
-  float32x2_t __ret_57; \
-  __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_f32(__rev2_57, __p3_57); \
-  __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \
-  __ret_57; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
-  int32x2_t __s0_58 = __p0_58; \
-  int32x2_t __s1_58 = __p1_58; \
-  int32x2_t __s2_58 = __p2_58; \
-  int32x2_t __ret_58; \
-  __ret_58 = __s0_58 + __s1_58 * splat_lane_s32(__s2_58, __p3_58); \
-  __ret_58; \
-})
-#else
-#define vmla_lane_s32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
-  int32x2_t __s0_59 = __p0_59; \
-  int32x2_t __s1_59 = __p1_59; \
-  int32x2_t __s2_59 = __p2_59; \
-  int32x2_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 1, 0); \
-  int32x2_t __rev1_59;  __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 1, 0); \
-  int32x2_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \
-  int32x2_t __ret_59; \
-  __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_s32(__rev2_59, __p3_59); \
-  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 1, 0); \
-  __ret_59; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s16(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
-  int16x4_t __s0_60 = __p0_60; \
-  int16x4_t __s1_60 = __p1_60; \
-  int16x4_t __s2_60 = __p2_60; \
-  int16x4_t __ret_60; \
-  __ret_60 = __s0_60 + __s1_60 * splat_lane_s16(__s2_60, __p3_60); \
-  __ret_60; \
-})
-#else
-#define vmla_lane_s16(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
-  int16x4_t __s0_61 = __p0_61; \
-  int16x4_t __s1_61 = __p1_61; \
-  int16x4_t __s2_61 = __p2_61; \
-  int16x4_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
-  int16x4_t __rev1_61;  __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \
-  int16x4_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
-  int16x4_t __ret_61; \
-  __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_s16(__rev2_61, __p3_61); \
-  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
-  __ret_61; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
-  uint32x4_t __s0_62 = __p0_62; \
-  uint32x4_t __s1_62 = __p1_62; \
-  uint32x2_t __s2_62 = __p2_62; \
-  uint32x4_t __ret_62; \
-  __ret_62 = __s0_62 - __s1_62 * splatq_lane_u32(__s2_62, __p3_62); \
-  __ret_62; \
-})
-#else
-#define vmlsq_lane_u32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
-  uint32x4_t __s0_63 = __p0_63; \
-  uint32x4_t __s1_63 = __p1_63; \
-  uint32x2_t __s2_63 = __p2_63; \
-  uint32x4_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 3, 2, 1, 0); \
-  uint32x4_t __rev1_63;  __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 3, 2, 1, 0); \
-  uint32x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
-  uint32x4_t __ret_63; \
-  __ret_63 = __rev0_63 - __rev1_63 * __noswap_splatq_lane_u32(__rev2_63, __p3_63); \
-  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 3, 2, 1, 0); \
-  __ret_63; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
-  uint16x8_t __s0_64 = __p0_64; \
-  uint16x8_t __s1_64 = __p1_64; \
-  uint16x4_t __s2_64 = __p2_64; \
-  uint16x8_t __ret_64; \
-  __ret_64 = __s0_64 - __s1_64 * splatq_lane_u16(__s2_64, __p3_64); \
-  __ret_64; \
-})
-#else
-#define vmlsq_lane_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
-  uint16x8_t __s0_65 = __p0_65; \
-  uint16x8_t __s1_65 = __p1_65; \
-  uint16x4_t __s2_65 = __p2_65; \
-  uint16x8_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_65;  __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \
-  uint16x8_t __ret_65; \
-  __ret_65 = __rev0_65 - __rev1_65 * __noswap_splatq_lane_u16(__rev2_65, __p3_65); \
-  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_65; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_f32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
-  float32x4_t __s0_66 = __p0_66; \
-  float32x4_t __s1_66 = __p1_66; \
-  float32x2_t __s2_66 = __p2_66; \
-  float32x4_t __ret_66; \
-  __ret_66 = __s0_66 - __s1_66 * splatq_lane_f32(__s2_66, __p3_66); \
-  __ret_66; \
-})
-#else
-#define vmlsq_lane_f32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
-  float32x4_t __s0_67 = __p0_67; \
-  float32x4_t __s1_67 = __p1_67; \
-  float32x2_t __s2_67 = __p2_67; \
-  float32x4_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \
-  float32x4_t __rev1_67;  __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \
-  float32x2_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \
-  float32x4_t __ret_67; \
-  __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_f32(__rev2_67, __p3_67); \
-  __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \
-  __ret_67; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
-  int32x4_t __s0_68 = __p0_68; \
-  int32x4_t __s1_68 = __p1_68; \
-  int32x2_t __s2_68 = __p2_68; \
-  int32x4_t __ret_68; \
-  __ret_68 = __s0_68 - __s1_68 * splatq_lane_s32(__s2_68, __p3_68); \
-  __ret_68; \
-})
-#else
-#define vmlsq_lane_s32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
-  int32x4_t __s0_69 = __p0_69; \
-  int32x4_t __s1_69 = __p1_69; \
-  int32x2_t __s2_69 = __p2_69; \
-  int32x4_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
-  int32x4_t __rev1_69;  __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 3, 2, 1, 0); \
-  int32x2_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \
-  int32x4_t __ret_69; \
-  __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_s32(__rev2_69, __p3_69); \
-  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
-  __ret_69; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s16(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
-  int16x8_t __s0_70 = __p0_70; \
-  int16x8_t __s1_70 = __p1_70; \
-  int16x4_t __s2_70 = __p2_70; \
-  int16x8_t __ret_70; \
-  __ret_70 = __s0_70 - __s1_70 * splatq_lane_s16(__s2_70, __p3_70); \
-  __ret_70; \
-})
-#else
-#define vmlsq_lane_s16(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
-  int16x8_t __s0_71 = __p0_71; \
-  int16x8_t __s1_71 = __p1_71; \
-  int16x4_t __s2_71 = __p2_71; \
-  int16x8_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_71;  __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
-  int16x8_t __ret_71; \
-  __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_s16(__rev2_71, __p3_71); \
-  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_71; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
-  uint32x2_t __s0_72 = __p0_72; \
-  uint32x2_t __s1_72 = __p1_72; \
-  uint32x2_t __s2_72 = __p2_72; \
-  uint32x2_t __ret_72; \
-  __ret_72 = __s0_72 - __s1_72 * splat_lane_u32(__s2_72, __p3_72); \
-  __ret_72; \
-})
-#else
-#define vmls_lane_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
-  uint32x2_t __s0_73 = __p0_73; \
-  uint32x2_t __s1_73 = __p1_73; \
-  uint32x2_t __s2_73 = __p2_73; \
-  uint32x2_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
-  uint32x2_t __rev1_73;  __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 1, 0); \
-  uint32x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
-  uint32x2_t __ret_73; \
-  __ret_73 = __rev0_73 - __rev1_73 * __noswap_splat_lane_u32(__rev2_73, __p3_73); \
-  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
-  __ret_73; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
-  uint16x4_t __s0_74 = __p0_74; \
-  uint16x4_t __s1_74 = __p1_74; \
-  uint16x4_t __s2_74 = __p2_74; \
-  uint16x4_t __ret_74; \
-  __ret_74 = __s0_74 - __s1_74 * splat_lane_u16(__s2_74, __p3_74); \
-  __ret_74; \
-})
-#else
-#define vmls_lane_u16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
-  uint16x4_t __s0_75 = __p0_75; \
-  uint16x4_t __s1_75 = __p1_75; \
-  uint16x4_t __s2_75 = __p2_75; \
-  uint16x4_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \
-  uint16x4_t __rev1_75;  __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \
-  uint16x4_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \
-  uint16x4_t __ret_75; \
-  __ret_75 = __rev0_75 - __rev1_75 * __noswap_splat_lane_u16(__rev2_75, __p3_75); \
-  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \
-  __ret_75; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_f32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
-  float32x2_t __s0_76 = __p0_76; \
-  float32x2_t __s1_76 = __p1_76; \
-  float32x2_t __s2_76 = __p2_76; \
-  float32x2_t __ret_76; \
-  __ret_76 = __s0_76 - __s1_76 * splat_lane_f32(__s2_76, __p3_76); \
-  __ret_76; \
-})
-#else
-#define vmls_lane_f32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
-  float32x2_t __s0_77 = __p0_77; \
-  float32x2_t __s1_77 = __p1_77; \
-  float32x2_t __s2_77 = __p2_77; \
-  float32x2_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \
-  float32x2_t __rev1_77;  __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \
-  float32x2_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \
-  float32x2_t __ret_77; \
-  __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_f32(__rev2_77, __p3_77); \
-  __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \
-  __ret_77; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
-  int32x2_t __s0_78 = __p0_78; \
-  int32x2_t __s1_78 = __p1_78; \
-  int32x2_t __s2_78 = __p2_78; \
-  int32x2_t __ret_78; \
-  __ret_78 = __s0_78 - __s1_78 * splat_lane_s32(__s2_78, __p3_78); \
-  __ret_78; \
-})
-#else
-#define vmls_lane_s32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
-  int32x2_t __s0_79 = __p0_79; \
-  int32x2_t __s1_79 = __p1_79; \
-  int32x2_t __s2_79 = __p2_79; \
-  int32x2_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 1, 0); \
-  int32x2_t __rev1_79;  __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 1, 0); \
-  int32x2_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \
-  int32x2_t __ret_79; \
-  __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_s32(__rev2_79, __p3_79); \
-  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 1, 0); \
-  __ret_79; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
-  int16x4_t __s0_80 = __p0_80; \
-  int16x4_t __s1_80 = __p1_80; \
-  int16x4_t __s2_80 = __p2_80; \
-  int16x4_t __ret_80; \
-  __ret_80 = __s0_80 - __s1_80 * splat_lane_s16(__s2_80, __p3_80); \
-  __ret_80; \
-})
-#else
-#define vmls_lane_s16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
-  int16x4_t __s0_81 = __p0_81; \
-  int16x4_t __s1_81 = __p1_81; \
-  int16x4_t __s2_81 = __p2_81; \
-  int16x4_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \
-  int16x4_t __rev1_81;  __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \
-  int16x4_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \
-  int16x4_t __ret_81; \
-  __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_s16(__rev2_81, __p3_81); \
-  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \
-  __ret_81; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmovq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x16_t vmovq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmovq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x4_t vmovq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x4_t vmovq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int64x2_t vmovq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x8_t vmovq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vmov_n_u64(uint64_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmov_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x8_t vmov_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmov_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x2_t vmov_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmov_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x2_t vmov_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vmov_n_s64(int64_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmov_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x4_t vmov_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_s8(int8x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmovl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_s32(int32x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vmovl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_s16(int16x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmovl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u32(__p0_82, __p1_82, __p2_82) __extension__ ({ \
-  uint32x4_t __s0_82 = __p0_82; \
-  uint32x2_t __s1_82 = __p1_82; \
-  uint32x4_t __ret_82; \
-  __ret_82 = __s0_82 * splatq_lane_u32(__s1_82, __p2_82); \
-  __ret_82; \
-})
-#else
-#define vmulq_lane_u32(__p0_83, __p1_83, __p2_83) __extension__ ({ \
-  uint32x4_t __s0_83 = __p0_83; \
-  uint32x2_t __s1_83 = __p1_83; \
-  uint32x4_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 3, 2, 1, 0); \
-  uint32x2_t __rev1_83;  __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \
-  uint32x4_t __ret_83; \
-  __ret_83 = __rev0_83 * __noswap_splatq_lane_u32(__rev1_83, __p2_83); \
-  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 3, 2, 1, 0); \
-  __ret_83; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u16(__p0_84, __p1_84, __p2_84) __extension__ ({ \
-  uint16x8_t __s0_84 = __p0_84; \
-  uint16x4_t __s1_84 = __p1_84; \
-  uint16x8_t __ret_84; \
-  __ret_84 = __s0_84 * splatq_lane_u16(__s1_84, __p2_84); \
-  __ret_84; \
-})
-#else
-#define vmulq_lane_u16(__p0_85, __p1_85, __p2_85) __extension__ ({ \
-  uint16x8_t __s0_85 = __p0_85; \
-  uint16x4_t __s1_85 = __p1_85; \
-  uint16x8_t __rev0_85;  __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1_85;  __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \
-  uint16x8_t __ret_85; \
-  __ret_85 = __rev0_85 * __noswap_splatq_lane_u16(__rev1_85, __p2_85); \
-  __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_85; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f32(__p0_86, __p1_86, __p2_86) __extension__ ({ \
-  float32x4_t __s0_86 = __p0_86; \
-  float32x2_t __s1_86 = __p1_86; \
-  float32x4_t __ret_86; \
-  __ret_86 = __s0_86 * splatq_lane_f32(__s1_86, __p2_86); \
-  __ret_86; \
-})
-#else
-#define vmulq_lane_f32(__p0_87, __p1_87, __p2_87) __extension__ ({ \
-  float32x4_t __s0_87 = __p0_87; \
-  float32x2_t __s1_87 = __p1_87; \
-  float32x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
-  float32x2_t __rev1_87;  __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \
-  float32x4_t __ret_87; \
-  __ret_87 = __rev0_87 * __noswap_splatq_lane_f32(__rev1_87, __p2_87); \
-  __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
-  __ret_87; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s32(__p0_88, __p1_88, __p2_88) __extension__ ({ \
-  int32x4_t __s0_88 = __p0_88; \
-  int32x2_t __s1_88 = __p1_88; \
-  int32x4_t __ret_88; \
-  __ret_88 = __s0_88 * splatq_lane_s32(__s1_88, __p2_88); \
-  __ret_88; \
-})
-#else
-#define vmulq_lane_s32(__p0_89, __p1_89, __p2_89) __extension__ ({ \
-  int32x4_t __s0_89 = __p0_89; \
-  int32x2_t __s1_89 = __p1_89; \
-  int32x4_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 3, 2, 1, 0); \
-  int32x2_t __rev1_89;  __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \
-  int32x4_t __ret_89; \
-  __ret_89 = __rev0_89 * __noswap_splatq_lane_s32(__rev1_89, __p2_89); \
-  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 3, 2, 1, 0); \
-  __ret_89; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s16(__p0_90, __p1_90, __p2_90) __extension__ ({ \
-  int16x8_t __s0_90 = __p0_90; \
-  int16x4_t __s1_90 = __p1_90; \
-  int16x8_t __ret_90; \
-  __ret_90 = __s0_90 * splatq_lane_s16(__s1_90, __p2_90); \
-  __ret_90; \
-})
-#else
-#define vmulq_lane_s16(__p0_91, __p1_91, __p2_91) __extension__ ({ \
-  int16x8_t __s0_91 = __p0_91; \
-  int16x4_t __s1_91 = __p1_91; \
-  int16x8_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_91;  __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 3, 2, 1, 0); \
-  int16x8_t __ret_91; \
-  __ret_91 = __rev0_91 * __noswap_splatq_lane_s16(__rev1_91, __p2_91); \
-  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_91; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u32(__p0_92, __p1_92, __p2_92) __extension__ ({ \
-  uint32x2_t __s0_92 = __p0_92; \
-  uint32x2_t __s1_92 = __p1_92; \
-  uint32x2_t __ret_92; \
-  __ret_92 = __s0_92 * splat_lane_u32(__s1_92, __p2_92); \
-  __ret_92; \
-})
-#else
-#define vmul_lane_u32(__p0_93, __p1_93, __p2_93) __extension__ ({ \
-  uint32x2_t __s0_93 = __p0_93; \
-  uint32x2_t __s1_93 = __p1_93; \
-  uint32x2_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
-  uint32x2_t __rev1_93;  __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \
-  uint32x2_t __ret_93; \
-  __ret_93 = __rev0_93 * __noswap_splat_lane_u32(__rev1_93, __p2_93); \
-  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
-  __ret_93; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u16(__p0_94, __p1_94, __p2_94) __extension__ ({ \
-  uint16x4_t __s0_94 = __p0_94; \
-  uint16x4_t __s1_94 = __p1_94; \
-  uint16x4_t __ret_94; \
-  __ret_94 = __s0_94 * splat_lane_u16(__s1_94, __p2_94); \
-  __ret_94; \
-})
-#else
-#define vmul_lane_u16(__p0_95, __p1_95, __p2_95) __extension__ ({ \
-  uint16x4_t __s0_95 = __p0_95; \
-  uint16x4_t __s1_95 = __p1_95; \
-  uint16x4_t __rev0_95;  __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \
-  uint16x4_t __rev1_95;  __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \
-  uint16x4_t __ret_95; \
-  __ret_95 = __rev0_95 * __noswap_splat_lane_u16(__rev1_95, __p2_95); \
-  __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \
-  __ret_95; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f32(__p0_96, __p1_96, __p2_96) __extension__ ({ \
-  float32x2_t __s0_96 = __p0_96; \
-  float32x2_t __s1_96 = __p1_96; \
-  float32x2_t __ret_96; \
-  __ret_96 = __s0_96 * splat_lane_f32(__s1_96, __p2_96); \
-  __ret_96; \
-})
-#else
-#define vmul_lane_f32(__p0_97, __p1_97, __p2_97) __extension__ ({ \
-  float32x2_t __s0_97 = __p0_97; \
-  float32x2_t __s1_97 = __p1_97; \
-  float32x2_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \
-  float32x2_t __rev1_97;  __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \
-  float32x2_t __ret_97; \
-  __ret_97 = __rev0_97 * __noswap_splat_lane_f32(__rev1_97, __p2_97); \
-  __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \
-  __ret_97; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s32(__p0_98, __p1_98, __p2_98) __extension__ ({ \
-  int32x2_t __s0_98 = __p0_98; \
-  int32x2_t __s1_98 = __p1_98; \
-  int32x2_t __ret_98; \
-  __ret_98 = __s0_98 * splat_lane_s32(__s1_98, __p2_98); \
-  __ret_98; \
-})
-#else
-#define vmul_lane_s32(__p0_99, __p1_99, __p2_99) __extension__ ({ \
-  int32x2_t __s0_99 = __p0_99; \
-  int32x2_t __s1_99 = __p1_99; \
-  int32x2_t __rev0_99;  __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \
-  int32x2_t __rev1_99;  __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \
-  int32x2_t __ret_99; \
-  __ret_99 = __rev0_99 * __noswap_splat_lane_s32(__rev1_99, __p2_99); \
-  __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \
-  __ret_99; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s16(__p0_100, __p1_100, __p2_100) __extension__ ({ \
-  int16x4_t __s0_100 = __p0_100; \
-  int16x4_t __s1_100 = __p1_100; \
-  int16x4_t __ret_100; \
-  __ret_100 = __s0_100 * splat_lane_s16(__s1_100, __p2_100); \
-  __ret_100; \
-})
-#else
-#define vmul_lane_s16(__p0_101, __p1_101, __p2_101) __extension__ ({ \
-  int16x4_t __s0_101 = __p0_101; \
-  int16x4_t __s1_101 = __p1_101; \
-  int16x4_t __rev0_101;  __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \
-  int16x4_t __rev1_101;  __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 3, 2, 1, 0); \
-  int16x4_t __ret_101; \
-  __ret_101 = __rev0_101 * __noswap_splat_lane_s16(__rev1_101, __p2_101); \
-  __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \
-  __ret_101; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 * (uint32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 * (uint32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 * (float32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 * (float32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 * (int32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 * (int32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u32(__p0_102, __p1_102, __p2_102) __extension__ ({ \
-  uint32x2_t __s0_102 = __p0_102; \
-  uint32x2_t __s1_102 = __p1_102; \
-  uint64x2_t __ret_102; \
-  __ret_102 = vmull_u32(__s0_102, splat_lane_u32(__s1_102, __p2_102)); \
-  __ret_102; \
-})
-#else
-#define vmull_lane_u32(__p0_103, __p1_103, __p2_103) __extension__ ({ \
-  uint32x2_t __s0_103 = __p0_103; \
-  uint32x2_t __s1_103 = __p1_103; \
-  uint32x2_t __rev0_103;  __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \
-  uint32x2_t __rev1_103;  __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \
-  uint64x2_t __ret_103; \
-  __ret_103 = __noswap_vmull_u32(__rev0_103, __noswap_splat_lane_u32(__rev1_103, __p2_103)); \
-  __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \
-  __ret_103; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u16(__p0_104, __p1_104, __p2_104) __extension__ ({ \
-  uint16x4_t __s0_104 = __p0_104; \
-  uint16x4_t __s1_104 = __p1_104; \
-  uint32x4_t __ret_104; \
-  __ret_104 = vmull_u16(__s0_104, splat_lane_u16(__s1_104, __p2_104)); \
-  __ret_104; \
-})
-#else
-#define vmull_lane_u16(__p0_105, __p1_105, __p2_105) __extension__ ({ \
-  uint16x4_t __s0_105 = __p0_105; \
-  uint16x4_t __s1_105 = __p1_105; \
-  uint16x4_t __rev0_105;  __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \
-  uint16x4_t __rev1_105;  __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \
-  uint32x4_t __ret_105; \
-  __ret_105 = __noswap_vmull_u16(__rev0_105, __noswap_splat_lane_u16(__rev1_105, __p2_105)); \
-  __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \
-  __ret_105; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s32(__p0_106, __p1_106, __p2_106) __extension__ ({ \
-  int32x2_t __s0_106 = __p0_106; \
-  int32x2_t __s1_106 = __p1_106; \
-  int64x2_t __ret_106; \
-  __ret_106 = vmull_s32(__s0_106, splat_lane_s32(__s1_106, __p2_106)); \
-  __ret_106; \
-})
-#else
-#define vmull_lane_s32(__p0_107, __p1_107, __p2_107) __extension__ ({ \
-  int32x2_t __s0_107 = __p0_107; \
-  int32x2_t __s1_107 = __p1_107; \
-  int32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
-  int32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
-  int64x2_t __ret_107; \
-  __ret_107 = __noswap_vmull_s32(__rev0_107, __noswap_splat_lane_s32(__rev1_107, __p2_107)); \
-  __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \
-  __ret_107; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s16(__p0_108, __p1_108, __p2_108) __extension__ ({ \
-  int16x4_t __s0_108 = __p0_108; \
-  int16x4_t __s1_108 = __p1_108; \
-  int32x4_t __ret_108; \
-  __ret_108 = vmull_s16(__s0_108, splat_lane_s16(__s1_108, __p2_108)); \
-  __ret_108; \
-})
-#else
-#define vmull_lane_s16(__p0_109, __p1_109, __p2_109) __extension__ ({ \
-  int16x4_t __s0_109 = __p0_109; \
-  int16x4_t __s1_109 = __p1_109; \
-  int16x4_t __rev0_109;  __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \
-  int16x4_t __rev1_109;  __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \
-  int32x4_t __ret_109; \
-  __ret_109 = __noswap_vmull_s16(__rev0_109, __noswap_splat_lane_s16(__rev1_109, __p2_109)); \
-  __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \
-  __ret_109; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmvn_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int8x8_t vmvn_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmvn_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int32x2_t vmvn_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmvn_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int16x4_t vmvn_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vnegq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int8x16_t vnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vnegq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float32x4_t vnegq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vnegq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int32x4_t vnegq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vnegq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int16x8_t vnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vneg_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int8x8_t vneg_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vneg_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float32x2_t vneg_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vneg_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int32x2_t vneg_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vneg_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int16x4_t vneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#else
-__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#else
-__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#else
-__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#else
-__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqabs_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqabs_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqabs_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
-  int64x2_t __s0_110 = __p0_110; \
-  int32x2_t __s1_110 = __p1_110; \
-  int32x2_t __s2_110 = __p2_110; \
-  int64x2_t __ret_110; \
-  __ret_110 = vqdmlal_s32(__s0_110, __s1_110, splat_lane_s32(__s2_110, __p3_110)); \
-  __ret_110; \
-})
-#else
-#define vqdmlal_lane_s32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
-  int64x2_t __s0_111 = __p0_111; \
-  int32x2_t __s1_111 = __p1_111; \
-  int32x2_t __s2_111 = __p2_111; \
-  int64x2_t __rev0_111;  __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
-  int32x2_t __rev1_111;  __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
-  int32x2_t __rev2_111;  __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \
-  int64x2_t __ret_111; \
-  __ret_111 = __noswap_vqdmlal_s32(__rev0_111, __rev1_111, __noswap_splat_lane_s32(__rev2_111, __p3_111)); \
-  __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
-  __ret_111; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s16(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
-  int32x4_t __s0_112 = __p0_112; \
-  int16x4_t __s1_112 = __p1_112; \
-  int16x4_t __s2_112 = __p2_112; \
-  int32x4_t __ret_112; \
-  __ret_112 = vqdmlal_s16(__s0_112, __s1_112, splat_lane_s16(__s2_112, __p3_112)); \
-  __ret_112; \
-})
-#else
-#define vqdmlal_lane_s16(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
-  int32x4_t __s0_113 = __p0_113; \
-  int16x4_t __s1_113 = __p1_113; \
-  int16x4_t __s2_113 = __p2_113; \
-  int32x4_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \
-  int16x4_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \
-  int16x4_t __rev2_113;  __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 3, 2, 1, 0); \
-  int32x4_t __ret_113; \
-  __ret_113 = __noswap_vqdmlal_s16(__rev0_113, __rev1_113, __noswap_splat_lane_s16(__rev2_113, __p3_113)); \
-  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \
-  __ret_113; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
-  int64x2_t __s0_114 = __p0_114; \
-  int32x2_t __s1_114 = __p1_114; \
-  int32x2_t __s2_114 = __p2_114; \
-  int64x2_t __ret_114; \
-  __ret_114 = vqdmlsl_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \
-  __ret_114; \
-})
-#else
-#define vqdmlsl_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
-  int64x2_t __s0_115 = __p0_115; \
-  int32x2_t __s1_115 = __p1_115; \
-  int32x2_t __s2_115 = __p2_115; \
-  int64x2_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \
-  int32x2_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \
-  int32x2_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \
-  int64x2_t __ret_115; \
-  __ret_115 = __noswap_vqdmlsl_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \
-  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \
-  __ret_115; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
-  int32x4_t __s0_116 = __p0_116; \
-  int16x4_t __s1_116 = __p1_116; \
-  int16x4_t __s2_116 = __p2_116; \
-  int32x4_t __ret_116; \
-  __ret_116 = vqdmlsl_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \
-  __ret_116; \
-})
-#else
-#define vqdmlsl_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
-  int32x4_t __s0_117 = __p0_117; \
-  int16x4_t __s1_117 = __p1_117; \
-  int16x4_t __s2_117 = __p2_117; \
-  int32x4_t __rev0_117;  __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \
-  int16x4_t __rev1_117;  __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \
-  int16x4_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \
-  int32x4_t __ret_117; \
-  __ret_117 = __noswap_vqdmlsl_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \
-  __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \
-  __ret_117; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \
-  int32x2_t __s0_118 = __p0_118; \
-  int32x2_t __s1_118 = __p1_118; \
-  int64x2_t __ret_118; \
-  __ret_118 = vqdmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \
-  __ret_118; \
-})
-#else
-#define vqdmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \
-  int32x2_t __s0_119 = __p0_119; \
-  int32x2_t __s1_119 = __p1_119; \
-  int32x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
-  int32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
-  int64x2_t __ret_119; \
-  __ret_119 = __noswap_vqdmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \
-  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
-  __ret_119; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \
-  int16x4_t __s0_120 = __p0_120; \
-  int16x4_t __s1_120 = __p1_120; \
-  int32x4_t __ret_120; \
-  __ret_120 = vqdmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \
-  __ret_120; \
-})
-#else
-#define vqdmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \
-  int16x4_t __s0_121 = __p0_121; \
-  int16x4_t __s1_121 = __p1_121; \
-  int16x4_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \
-  int16x4_t __rev1_121;  __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \
-  int32x4_t __ret_121; \
-  __ret_121 = __noswap_vqdmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \
-  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \
-  __ret_121; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqneg_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqneg_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqneg_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqneg_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqneg_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev16_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev16_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev32_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev32_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrev32_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai int16x4_t vrev32_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev64_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev64_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrev64_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai float32x2_t vrev64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrev64_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai int32x2_t vrev64_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrev64_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai int16x4_t vrev64_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \
-})
-#else
-#define vst1_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \
-})
-#else
-#define vst1_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \
-})
-#else
-#define vst1q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \
-})
-#else
-#define vst1q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \
-})
-#else
-#define vst1q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \
-})
-#else
-#define vst1q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \
-})
-#else
-#define vst1q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \
-})
-#else
-#define vst1q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \
-})
-#else
-#define vst1q_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \
-})
-#else
-#define vst1q_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \
-})
-#else
-#define vst1q_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \
-})
-#else
-#define vst1q_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \
-})
-#else
-#define vst1q_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \
-})
-#else
-#define vst1_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \
-})
-#else
-#define vst1_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \
-})
-#endif
-
-#define vst1_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \
-})
-#else
-#define vst1_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \
-})
-#else
-#define vst1_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \
-})
-#else
-#define vst1_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \
-})
-#else
-#define vst1_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \
-})
-#endif
-
-#define vst1_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \
-})
-#else
-#define vst1_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
-})
-#else
-#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
-})
-#else
-#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
-})
-#else
-#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
-})
-#else
-#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
-})
-#else
-#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
-})
-#else
-#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
-})
-#else
-#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
-})
-#else
-#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
-})
-#else
-#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
-})
-#else
-#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
-})
-#else
-#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
-})
-#else
-#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
-})
-#else
-#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
-})
-#else
-#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
-})
-#else
-#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
-})
-#endif
-
-#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
-})
-#else
-#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
-})
-#else
-#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
-})
-#else
-#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
-})
-#else
-#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
-})
-#endif
-
-#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
-})
-#else
-#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
-})
-#else
-#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
-})
-#else
-#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
-})
-#else
-#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
-})
-#else
-#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
-})
-#else
-#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
-})
-#else
-#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
-})
-#else
-#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
-})
-#else
-#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
-})
-#else
-#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \
-})
-#else
-#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \
-})
-#else
-#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \
-})
-#else
-#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \
-})
-#else
-#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
-})
-#else
-#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
-})
-#else
-#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
-})
-#endif
-
-#define vst1_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
-})
-#else
-#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
-})
-#else
-#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \
-})
-#else
-#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \
-})
-#else
-#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \
-})
-#endif
-
-#define vst1_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \
-})
-#else
-#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
-})
-#else
-#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
-})
-#else
-#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
-})
-#else
-#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
-})
-#else
-#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
-})
-#else
-#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
-})
-#else
-#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
-})
-#else
-#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
-})
-#else
-#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
-})
-#else
-#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \
-})
-#else
-#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \
-})
-#else
-#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \
-})
-#else
-#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \
-})
-#else
-#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
-})
-#else
-#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
-})
-#else
-#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
-})
-#endif
-
-#define vst1_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
-})
-#else
-#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
-})
-#else
-#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \
-})
-#else
-#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \
-})
-#else
-#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \
-})
-#endif
-
-#define vst1_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \
-})
-#else
-#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
-})
-#else
-#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
-})
-#else
-#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
-})
-#else
-#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
-})
-#else
-#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
-})
-#else
-#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
-})
-#else
-#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
-})
-#else
-#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
-})
-#else
-#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
-})
-#else
-#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \
-})
-#else
-#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \
-})
-#else
-#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \
-})
-#else
-#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \
-})
-#else
-#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
-})
-#else
-#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
-})
-#else
-#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
-})
-#endif
-
-#define vst1_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
-})
-#else
-#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
-})
-#else
-#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \
-})
-#else
-#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \
-})
-#else
-#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \
-})
-#endif
-
-#define vst1_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \
-})
-#else
-#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
-})
-#else
-#define vst2_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
-})
-#else
-#define vst2_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
-})
-#else
-#define vst2q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
-})
-#else
-#define vst2q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
-})
-#else
-#define vst2q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
-})
-#else
-#define vst2q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
-})
-#else
-#define vst2q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
-})
-#else
-#define vst2q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \
-})
-#else
-#define vst2q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \
-})
-#else
-#define vst2q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \
-})
-#else
-#define vst2q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
-})
-#else
-#define vst2_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
-})
-#else
-#define vst2_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
-})
-#endif
-
-#define vst2_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
-})
-#else
-#define vst2_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s8(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
-})
-#else
-#define vst2_s8(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_f32(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \
-})
-#else
-#define vst2_f32(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s32(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \
-})
-#else
-#define vst2_s32(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \
-})
-#endif
-
-#define vst2_s64(__p0, __p1) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s16(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \
-})
-#else
-#define vst2_s16(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
-})
-#else
-#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
-})
-#else
-#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
-})
-#else
-#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
-})
-#else
-#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
-})
-#else
-#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
-})
-#else
-#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
-})
-#else
-#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
-})
-#else
-#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
-})
-#else
-#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
-})
-#else
-#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
-})
-#else
-#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
-})
-#else
-#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
-})
-#else
-#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
-})
-#else
-#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
-})
-#else
-#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
-})
-#else
-#define vst3_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
-})
-#else
-#define vst3_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
-})
-#else
-#define vst3q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
-})
-#else
-#define vst3q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
-})
-#else
-#define vst3q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
-})
-#else
-#define vst3q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
-})
-#else
-#define vst3q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
-})
-#else
-#define vst3q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \
-})
-#else
-#define vst3q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \
-})
-#else
-#define vst3q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \
-})
-#else
-#define vst3q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
-})
-#else
-#define vst3_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
-})
-#else
-#define vst3_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
-})
-#endif
-
-#define vst3_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
-})
-#else
-#define vst3_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s8(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
-})
-#else
-#define vst3_s8(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_f32(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \
-})
-#else
-#define vst3_f32(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s32(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \
-})
-#else
-#define vst3_s32(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \
-})
-#endif
-
-#define vst3_s64(__p0, __p1) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s16(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \
-})
-#else
-#define vst3_s16(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
-})
-#else
-#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
-})
-#else
-#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
-})
-#else
-#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
-})
-#else
-#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
-})
-#else
-#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
-})
-#else
-#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
-})
-#else
-#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
-})
-#else
-#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
-})
-#else
-#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
-})
-#else
-#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
-})
-#else
-#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
-})
-#else
-#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
-})
-#else
-#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
-})
-#else
-#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
-})
-#else
-#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
-})
-#else
-#define vst4_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
-})
-#else
-#define vst4_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
-})
-#else
-#define vst4q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
-})
-#else
-#define vst4q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
-})
-#else
-#define vst4q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
-})
-#else
-#define vst4q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
-})
-#else
-#define vst4q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
-})
-#else
-#define vst4q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \
-})
-#else
-#define vst4q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \
-})
-#else
-#define vst4q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \
-})
-#else
-#define vst4q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
-})
-#else
-#define vst4_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
-})
-#else
-#define vst4_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
-})
-#endif
-
-#define vst4_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
-})
-#else
-#define vst4_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s8(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
-})
-#else
-#define vst4_s8(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_f32(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \
-})
-#else
-#define vst4_f32(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s32(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \
-})
-#else
-#define vst4_s32(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \
-})
-#endif
-
-#define vst4_s64(__p0, __p1) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s16(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \
-})
-#else
-#define vst4_s16(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
-})
-#else
-#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
-})
-#else
-#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
-})
-#else
-#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
-})
-#else
-#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
-})
-#else
-#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
-})
-#else
-#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
-})
-#else
-#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
-})
-#else
-#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
-})
-#else
-#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
-})
-#else
-#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
-})
-#else
-#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
-})
-#else
-#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
-})
-#else
-#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
-})
-#else
-#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
-})
-#else
-#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_u8(__p0) - vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_u32(__p0) - vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_u16(__p0) - vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_s8(__p0) - vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_s32(__p0) - vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_s16(__p0) - vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
-  poly8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
-  int8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
-  poly8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
-  uint8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
-  int8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
-  poly8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
-  uint8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
-  int8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#if !defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0_122, __p1_122) __extension__ ({ \
-  float16x4_t __s0_122 = __p0_122; \
-  float16x8_t __ret_122; \
-  __ret_122 = splatq_lane_f16(__s0_122, __p1_122); \
-  __ret_122; \
-})
-#else
-#define vdupq_lane_f16(__p0_123, __p1_123) __extension__ ({ \
-  float16x4_t __s0_123 = __p0_123; \
-  float16x4_t __rev0_123;  __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 3, 2, 1, 0); \
-  float16x8_t __ret_123; \
-  __ret_123 = __noswap_splatq_lane_f16(__rev0_123, __p1_123); \
-  __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_123; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0_124, __p1_124) __extension__ ({ \
-  float16x4_t __s0_124 = __p0_124; \
-  float16x4_t __ret_124; \
-  __ret_124 = splat_lane_f16(__s0_124, __p1_124); \
-  __ret_124; \
-})
-#else
-#define vdup_lane_f16(__p0_125, __p1_125) __extension__ ({ \
-  float16x4_t __s0_125 = __p0_125; \
-  float16x4_t __rev0_125;  __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \
-  float16x4_t __ret_125; \
-  __ret_125 = __noswap_splat_lane_f16(__rev0_125, __p1_125); \
-  __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \
-  __ret_125; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0_126, __p1_126, __p2_126) __extension__ ({ \
-  int32x4_t __s0_126 = __p0_126; \
-  int32x2_t __s1_126 = __p1_126; \
-  int32x4_t __ret_126; \
-  __ret_126 = vqdmulhq_s32(__s0_126, splatq_lane_s32(__s1_126, __p2_126)); \
-  __ret_126; \
-})
-#else
-#define vqdmulhq_lane_s32(__p0_127, __p1_127, __p2_127) __extension__ ({ \
-  int32x4_t __s0_127 = __p0_127; \
-  int32x2_t __s1_127 = __p1_127; \
-  int32x4_t __rev0_127;  __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \
-  int32x2_t __rev1_127;  __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \
-  int32x4_t __ret_127; \
-  __ret_127 = __noswap_vqdmulhq_s32(__rev0_127, __noswap_splatq_lane_s32(__rev1_127, __p2_127)); \
-  __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \
-  __ret_127; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0_128, __p1_128, __p2_128) __extension__ ({ \
-  int16x8_t __s0_128 = __p0_128; \
-  int16x4_t __s1_128 = __p1_128; \
-  int16x8_t __ret_128; \
-  __ret_128 = vqdmulhq_s16(__s0_128, splatq_lane_s16(__s1_128, __p2_128)); \
-  __ret_128; \
-})
-#else
-#define vqdmulhq_lane_s16(__p0_129, __p1_129, __p2_129) __extension__ ({ \
-  int16x8_t __s0_129 = __p0_129; \
-  int16x4_t __s1_129 = __p1_129; \
-  int16x8_t __rev0_129;  __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_129;  __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \
-  int16x8_t __ret_129; \
-  __ret_129 = __noswap_vqdmulhq_s16(__rev0_129, __noswap_splatq_lane_s16(__rev1_129, __p2_129)); \
-  __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_129; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \
-  int32x2_t __s0_130 = __p0_130; \
-  int32x2_t __s1_130 = __p1_130; \
-  int32x2_t __ret_130; \
-  __ret_130 = vqdmulh_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \
-  __ret_130; \
-})
-#else
-#define vqdmulh_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \
-  int32x2_t __s0_131 = __p0_131; \
-  int32x2_t __s1_131 = __p1_131; \
-  int32x2_t __rev0_131;  __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \
-  int32x2_t __rev1_131;  __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \
-  int32x2_t __ret_131; \
-  __ret_131 = __noswap_vqdmulh_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \
-  __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \
-  __ret_131; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \
-  int16x4_t __s0_132 = __p0_132; \
-  int16x4_t __s1_132 = __p1_132; \
-  int16x4_t __ret_132; \
-  __ret_132 = vqdmulh_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \
-  __ret_132; \
-})
-#else
-#define vqdmulh_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \
-  int16x4_t __s0_133 = __p0_133; \
-  int16x4_t __s1_133 = __p1_133; \
-  int16x4_t __rev0_133;  __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \
-  int16x4_t __rev1_133;  __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \
-  int16x4_t __ret_133; \
-  __ret_133 = __noswap_vqdmulh_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \
-  __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \
-  __ret_133; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0_134, __p1_134, __p2_134) __extension__ ({ \
-  int32x4_t __s0_134 = __p0_134; \
-  int32x2_t __s1_134 = __p1_134; \
-  int32x4_t __ret_134; \
-  __ret_134 = vqrdmulhq_s32(__s0_134, splatq_lane_s32(__s1_134, __p2_134)); \
-  __ret_134; \
-})
-#else
-#define vqrdmulhq_lane_s32(__p0_135, __p1_135, __p2_135) __extension__ ({ \
-  int32x4_t __s0_135 = __p0_135; \
-  int32x2_t __s1_135 = __p1_135; \
-  int32x4_t __rev0_135;  __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \
-  int32x2_t __rev1_135;  __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 1, 0); \
-  int32x4_t __ret_135; \
-  __ret_135 = __noswap_vqrdmulhq_s32(__rev0_135, __noswap_splatq_lane_s32(__rev1_135, __p2_135)); \
-  __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \
-  __ret_135; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0_136, __p1_136, __p2_136) __extension__ ({ \
-  int16x8_t __s0_136 = __p0_136; \
-  int16x4_t __s1_136 = __p1_136; \
-  int16x8_t __ret_136; \
-  __ret_136 = vqrdmulhq_s16(__s0_136, splatq_lane_s16(__s1_136, __p2_136)); \
-  __ret_136; \
-})
-#else
-#define vqrdmulhq_lane_s16(__p0_137, __p1_137, __p2_137) __extension__ ({ \
-  int16x8_t __s0_137 = __p0_137; \
-  int16x4_t __s1_137 = __p1_137; \
-  int16x8_t __rev0_137;  __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_137;  __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 3, 2, 1, 0); \
-  int16x8_t __ret_137; \
-  __ret_137 = __noswap_vqrdmulhq_s16(__rev0_137, __noswap_splatq_lane_s16(__rev1_137, __p2_137)); \
-  __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_137; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0_138, __p1_138, __p2_138) __extension__ ({ \
-  int32x2_t __s0_138 = __p0_138; \
-  int32x2_t __s1_138 = __p1_138; \
-  int32x2_t __ret_138; \
-  __ret_138 = vqrdmulh_s32(__s0_138, splat_lane_s32(__s1_138, __p2_138)); \
-  __ret_138; \
-})
-#else
-#define vqrdmulh_lane_s32(__p0_139, __p1_139, __p2_139) __extension__ ({ \
-  int32x2_t __s0_139 = __p0_139; \
-  int32x2_t __s1_139 = __p1_139; \
-  int32x2_t __rev0_139;  __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \
-  int32x2_t __rev1_139;  __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \
-  int32x2_t __ret_139; \
-  __ret_139 = __noswap_vqrdmulh_s32(__rev0_139, __noswap_splat_lane_s32(__rev1_139, __p2_139)); \
-  __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \
-  __ret_139; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0_140, __p1_140, __p2_140) __extension__ ({ \
-  int16x4_t __s0_140 = __p0_140; \
-  int16x4_t __s1_140 = __p1_140; \
-  int16x4_t __ret_140; \
-  __ret_140 = vqrdmulh_s16(__s0_140, splat_lane_s16(__s1_140, __p2_140)); \
-  __ret_140; \
-})
-#else
-#define vqrdmulh_lane_s16(__p0_141, __p1_141, __p2_141) __extension__ ({ \
-  int16x4_t __s0_141 = __p0_141; \
-  int16x4_t __s1_141 = __p1_141; \
-  int16x4_t __rev0_141;  __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \
-  int16x4_t __rev1_141;  __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \
-  int16x4_t __ret_141; \
-  __ret_141 = __noswap_vqrdmulh_s16(__rev0_141, __noswap_splat_lane_s16(__rev1_141, __p2_141)); \
-  __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \
-  __ret_141; \
-})
-#endif
-
-__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if (__ARM_FP & 2)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_dup_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
-  __ret; \
-})
-#else
-#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x2(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x2(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x2(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x2(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x3(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x3(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x3(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x3(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x4(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x4(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x4(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x4(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld2q_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld2_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld2_dup_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld3q_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld3_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld3_dup_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld4q_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld4_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld4_dup_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \
-})
-#else
-#define vst1q_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \
-})
-#else
-#define vst1_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
-})
-#else
-#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
-})
-#else
-#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \
-})
-#else
-#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \
-})
-#else
-#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \
-})
-#else
-#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \
-})
-#else
-#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \
-})
-#else
-#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \
-})
-#else
-#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \
-})
-#else
-#define vst2q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_f16(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \
-})
-#else
-#define vst2_f16(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
-})
-#else
-#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
-})
-#else
-#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \
-})
-#else
-#define vst3q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_f16(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \
-})
-#else
-#define vst3_f16(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
-})
-#else
-#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
-})
-#else
-#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \
-})
-#else
-#define vst4q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_f16(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \
-})
-#else
-#define vst4_f16(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
-})
-#else
-#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
-})
-#else
-#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
-})
-#endif
-
-#endif
-#if __ARM_ARCH >= 8
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_AES)
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndaq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndaq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnda_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnda_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndiq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndiq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndi_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndi_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndmq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndmq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndm_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndm_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndnq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndnq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndn_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndn_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float32_t vrndns_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrndns_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndpq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndpq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndp_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndp_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndxq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndxq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndx_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndx_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrnd_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrnd_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndaq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndaq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrnda_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrnda_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndmq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndmq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndm_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndm_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndnq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndnq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndn_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndn_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndpq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndpq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndp_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndp_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndxq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndxq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndx_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndx_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA2)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint32_t vsha1h_u32(uint32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA3) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrax1q_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrax1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA512) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsha512su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM3) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
-  __ret; \
-})
-#else
-#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
-  __ret; \
-})
-#else
-#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
-  __ret; \
-})
-#else
-#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \
-  __ret; \
-})
-#else
-#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM4) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm4eq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm4eq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnd_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnda_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndi_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndm_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndn_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndp_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndx_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_FRINT)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd32x_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd32x_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd32z_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd32z_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd64x_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd64x_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd64z_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd64z_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)
-__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16) && defined(__aarch64__)
-__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#else
-#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#else
-#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#else
-#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#else
-#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdotq_lane_f32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \
-  float32x4_t __s0_142 = __p0_142; \
-  bfloat16x8_t __s1_142 = __p1_142; \
-  bfloat16x4_t __s2_142 = __p2_142; \
-  float32x4_t __ret_142; \
-bfloat16x4_t __reint_142 = __s2_142; \
-float32x4_t __reint1_142 = splatq_lane_f32(*(float32x2_t *) &__reint_142, __p3_142); \
-  __ret_142 = vbfdotq_f32(__s0_142, __s1_142, *(bfloat16x8_t *) &__reint1_142); \
-  __ret_142; \
-})
-#else
-#define vbfdotq_lane_f32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \
-  float32x4_t __s0_143 = __p0_143; \
-  bfloat16x8_t __s1_143 = __p1_143; \
-  bfloat16x4_t __s2_143 = __p2_143; \
-  float32x4_t __rev0_143;  __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_143;  __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_143;  __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 3, 2, 1, 0); \
-  float32x4_t __ret_143; \
-bfloat16x4_t __reint_143 = __rev2_143; \
-float32x4_t __reint1_143 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_143, __p3_143); \
-  __ret_143 = __noswap_vbfdotq_f32(__rev0_143, __rev1_143, *(bfloat16x8_t *) &__reint1_143); \
-  __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \
-  __ret_143; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdot_lane_f32(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \
-  float32x2_t __s0_144 = __p0_144; \
-  bfloat16x4_t __s1_144 = __p1_144; \
-  bfloat16x4_t __s2_144 = __p2_144; \
-  float32x2_t __ret_144; \
-bfloat16x4_t __reint_144 = __s2_144; \
-float32x2_t __reint1_144 = splat_lane_f32(*(float32x2_t *) &__reint_144, __p3_144); \
-  __ret_144 = vbfdot_f32(__s0_144, __s1_144, *(bfloat16x4_t *) &__reint1_144); \
-  __ret_144; \
-})
-#else
-#define vbfdot_lane_f32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \
-  float32x2_t __s0_145 = __p0_145; \
-  bfloat16x4_t __s1_145 = __p1_145; \
-  bfloat16x4_t __s2_145 = __p2_145; \
-  float32x2_t __rev0_145;  __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 1, 0); \
-  bfloat16x4_t __rev1_145;  __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_145;  __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \
-  float32x2_t __ret_145; \
-bfloat16x4_t __reint_145 = __rev2_145; \
-float32x2_t __reint1_145 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_145, __p3_145); \
-  __ret_145 = __noswap_vbfdot_f32(__rev0_145, __rev1_145, *(bfloat16x4_t *) &__reint1_145); \
-  __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 1, 0); \
-  __ret_145; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdotq_laneq_f32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \
-  float32x4_t __s0_146 = __p0_146; \
-  bfloat16x8_t __s1_146 = __p1_146; \
-  bfloat16x8_t __s2_146 = __p2_146; \
-  float32x4_t __ret_146; \
-bfloat16x8_t __reint_146 = __s2_146; \
-float32x4_t __reint1_146 = splatq_laneq_f32(*(float32x4_t *) &__reint_146, __p3_146); \
-  __ret_146 = vbfdotq_f32(__s0_146, __s1_146, *(bfloat16x8_t *) &__reint1_146); \
-  __ret_146; \
-})
-#else
-#define vbfdotq_laneq_f32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \
-  float32x4_t __s0_147 = __p0_147; \
-  bfloat16x8_t __s1_147 = __p1_147; \
-  bfloat16x8_t __s2_147 = __p2_147; \
-  float32x4_t __rev0_147;  __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_147;  __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_147; \
-bfloat16x8_t __reint_147 = __rev2_147; \
-float32x4_t __reint1_147 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_147, __p3_147); \
-  __ret_147 = __noswap_vbfdotq_f32(__rev0_147, __rev1_147, *(bfloat16x8_t *) &__reint1_147); \
-  __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 3, 2, 1, 0); \
-  __ret_147; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdot_laneq_f32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \
-  float32x2_t __s0_148 = __p0_148; \
-  bfloat16x4_t __s1_148 = __p1_148; \
-  bfloat16x8_t __s2_148 = __p2_148; \
-  float32x2_t __ret_148; \
-bfloat16x8_t __reint_148 = __s2_148; \
-float32x2_t __reint1_148 = splat_laneq_f32(*(float32x4_t *) &__reint_148, __p3_148); \
-  __ret_148 = vbfdot_f32(__s0_148, __s1_148, *(bfloat16x4_t *) &__reint1_148); \
-  __ret_148; \
-})
-#else
-#define vbfdot_laneq_f32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \
-  float32x2_t __s0_149 = __p0_149; \
-  bfloat16x4_t __s1_149 = __p1_149; \
-  bfloat16x8_t __s2_149 = __p2_149; \
-  float32x2_t __rev0_149;  __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 1, 0); \
-  bfloat16x4_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_149;  __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_149; \
-bfloat16x8_t __reint_149 = __rev2_149; \
-float32x2_t __reint1_149 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_149, __p3_149); \
-  __ret_149 = __noswap_vbfdot_f32(__rev0_149, __rev1_149, *(bfloat16x4_t *) &__reint1_149); \
-  __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 1, 0); \
-  __ret_149; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#define vcreate_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (bfloat16x4_t)(__promote); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_150) {
-  float32x4_t __ret_150;
-bfloat16x4_t __reint_150 = __p0_150;
-int32x4_t __reint1_150 = vshll_n_s16(*(int16x4_t *) &__reint_150, 16);
-  __ret_150 = *(float32x4_t *) &__reint1_150;
-  return __ret_150;
-}
-#else
-__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_151) {
-  bfloat16x4_t __rev0_151;  __rev0_151 = __builtin_shufflevector(__p0_151, __p0_151, 3, 2, 1, 0);
-  float32x4_t __ret_151;
-bfloat16x4_t __reint_151 = __rev0_151;
-int32x4_t __reint1_151 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_151, 16);
-  __ret_151 = *(float32x4_t *) &__reint1_151;
-  __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0);
-  return __ret_151;
-}
-__ai float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_152) {
-  float32x4_t __ret_152;
-bfloat16x4_t __reint_152 = __p0_152;
-int32x4_t __reint1_152 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_152, 16);
-  __ret_152 = *(float32x4_t *) &__reint1_152;
-  return __ret_152;
-}
-#endif
-
-__ai float32_t vcvtah_f32_bf16(bfloat16_t __p0) {
-  float32_t __ret;
-bfloat16_t __reint = __p0;
-int32_t __reint1 = *(int32_t *) &__reint << 16;
-  __ret = *(float32_t *) &__reint1;
-  return __ret;
-}
-__ai bfloat16_t vcvth_bf16_f32(float32_t __p0) {
-  bfloat16_t __ret;
-  __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_bf16(__p0_153, __p1_153) __extension__ ({ \
-  bfloat16x4_t __s0_153 = __p0_153; \
-  bfloat16x8_t __ret_153; \
-  __ret_153 = splatq_lane_bf16(__s0_153, __p1_153); \
-  __ret_153; \
-})
-#else
-#define vdupq_lane_bf16(__p0_154, __p1_154) __extension__ ({ \
-  bfloat16x4_t __s0_154 = __p0_154; \
-  bfloat16x4_t __rev0_154;  __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_154; \
-  __ret_154 = __noswap_splatq_lane_bf16(__rev0_154, __p1_154); \
-  __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_154; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_bf16(__p0_155, __p1_155) __extension__ ({ \
-  bfloat16x4_t __s0_155 = __p0_155; \
-  bfloat16x4_t __ret_155; \
-  __ret_155 = splat_lane_bf16(__s0_155, __p1_155); \
-  __ret_155; \
-})
-#else
-#define vdup_lane_bf16(__p0_156, __p1_156) __extension__ ({ \
-  bfloat16x4_t __s0_156 = __p0_156; \
-  bfloat16x4_t __rev0_156;  __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_156; \
-  __ret_156 = __noswap_splat_lane_bf16(__rev0_156, __p1_156); \
-  __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \
-  __ret_156; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_bf16(__p0_157, __p1_157) __extension__ ({ \
-  bfloat16x8_t __s0_157 = __p0_157; \
-  bfloat16x8_t __ret_157; \
-  __ret_157 = splatq_laneq_bf16(__s0_157, __p1_157); \
-  __ret_157; \
-})
-#else
-#define vdupq_laneq_bf16(__p0_158, __p1_158) __extension__ ({ \
-  bfloat16x8_t __s0_158 = __p0_158; \
-  bfloat16x8_t __rev0_158;  __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_158; \
-  __ret_158 = __noswap_splatq_laneq_bf16(__rev0_158, __p1_158); \
-  __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_158; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_bf16(__p0_159, __p1_159) __extension__ ({ \
-  bfloat16x8_t __s0_159 = __p0_159; \
-  bfloat16x4_t __ret_159; \
-  __ret_159 = splat_laneq_bf16(__s0_159, __p1_159); \
-  __ret_159; \
-})
-#else
-#define vdup_laneq_bf16(__p0_160, __p1_160) __extension__ ({ \
-  bfloat16x8_t __s0_160 = __p0_160; \
-  bfloat16x8_t __rev0_160;  __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_160; \
-  __ret_160 = __noswap_splat_laneq_bf16(__rev0_160, __p1_160); \
-  __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 3, 2, 1, 0); \
-  __ret_160; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
-  __ret; \
-})
-#else
-#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
-  __ret; \
-})
-#else
-#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld2q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld2_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld2q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld2_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld3q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld3_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld3q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld3_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld4q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld4_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld4q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld4_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 43); \
-})
-#else
-#define vst1q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 11); \
-})
-#else
-#define vst1_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
-})
-#else
-#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
-})
-#else
-#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
-})
-#else
-#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
-})
-#else
-#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
-})
-#else
-#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
-})
-#else
-#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
-})
-#else
-#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
-})
-#else
-#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
-})
-#else
-#define vst2q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
-})
-#else
-#define vst2_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
-})
-#else
-#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
-})
-#else
-#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
-})
-#else
-#define vst3q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
-})
-#else
-#define vst3_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
-})
-#else
-#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
-})
-#else
-#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
-})
-#else
-#define vst4q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
-})
-#else
-#define vst4_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
-})
-#else
-#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
-})
-#else
-#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__rev0, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __a32_vcvt_bf16_f32(__p0);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __noswap___a32_vcvt_bf16_f32(__rev0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__rev0, 43);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_bf16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \
-  bfloat16x8_t __s0_161 = __p0_161; \
-  bfloat16x4_t __s2_161 = __p2_161; \
-  bfloat16x8_t __ret_161; \
-  __ret_161 = vsetq_lane_bf16(vget_lane_bf16(__s2_161, __p3_161), __s0_161, __p1_161); \
-  __ret_161; \
-})
-#else
-#define vcopyq_lane_bf16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \
-  bfloat16x8_t __s0_162 = __p0_162; \
-  bfloat16x4_t __s2_162 = __p2_162; \
-  bfloat16x8_t __rev0_162;  __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_162;  __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_162; \
-  __ret_162 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_162, __p3_162), __rev0_162, __p1_162); \
-  __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_162; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_bf16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \
-  bfloat16x4_t __s0_163 = __p0_163; \
-  bfloat16x4_t __s2_163 = __p2_163; \
-  bfloat16x4_t __ret_163; \
-  __ret_163 = vset_lane_bf16(vget_lane_bf16(__s2_163, __p3_163), __s0_163, __p1_163); \
-  __ret_163; \
-})
-#else
-#define vcopy_lane_bf16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \
-  bfloat16x4_t __s0_164 = __p0_164; \
-  bfloat16x4_t __s2_164 = __p2_164; \
-  bfloat16x4_t __rev0_164;  __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_164;  __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_164; \
-  __ret_164 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_164, __p3_164), __rev0_164, __p1_164); \
-  __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 3, 2, 1, 0); \
-  __ret_164; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_bf16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \
-  bfloat16x8_t __s0_165 = __p0_165; \
-  bfloat16x8_t __s2_165 = __p2_165; \
-  bfloat16x8_t __ret_165; \
-  __ret_165 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_165, __p3_165), __s0_165, __p1_165); \
-  __ret_165; \
-})
-#else
-#define vcopyq_laneq_bf16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \
-  bfloat16x8_t __s0_166 = __p0_166; \
-  bfloat16x8_t __s2_166 = __p2_166; \
-  bfloat16x8_t __rev0_166;  __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_166;  __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_166; \
-  __ret_166 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_166, __p3_166), __rev0_166, __p1_166); \
-  __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_166; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_bf16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \
-  bfloat16x4_t __s0_167 = __p0_167; \
-  bfloat16x8_t __s2_167 = __p2_167; \
-  bfloat16x4_t __ret_167; \
-  __ret_167 = vset_lane_bf16(vgetq_lane_bf16(__s2_167, __p3_167), __s0_167, __p1_167); \
-  __ret_167; \
-})
-#else
-#define vcopy_laneq_bf16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \
-  bfloat16x4_t __s0_168 = __p0_168; \
-  bfloat16x8_t __s2_168 = __p2_168; \
-  bfloat16x4_t __rev0_168;  __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_168;  __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_168; \
-  __ret_168 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_168, __p3_168), __rev0_168, __p1_168); \
-  __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \
-  __ret_168; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__p0, (int8x16_t)__p1, 43);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__rev0, (int8x16_t)__rev1, 43);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = __a64_vcvtq_low_bf16_f32(__p0);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_lane_f32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \
-  float32x2_t __s0_169 = __p0_169; \
-  float32x2_t __s1_169 = __p1_169; \
-  float32x2_t __s2_169 = __p2_169; \
-  float32x2_t __ret_169; \
-float32x2_t __reint_169 = __s2_169; \
-uint64x1_t __reint1_169 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_169, __p3_169)}; \
-  __ret_169 = vcmla_f32(__s0_169, __s1_169, *(float32x2_t *) &__reint1_169); \
-  __ret_169; \
-})
-#else
-#define vcmla_lane_f32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \
-  float32x2_t __s0_170 = __p0_170; \
-  float32x2_t __s1_170 = __p1_170; \
-  float32x2_t __s2_170 = __p2_170; \
-  float32x2_t __rev0_170;  __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 1, 0); \
-  float32x2_t __rev1_170;  __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 1, 0); \
-  float32x2_t __rev2_170;  __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \
-  float32x2_t __ret_170; \
-float32x2_t __reint_170 = __rev2_170; \
-uint64x1_t __reint1_170 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_170, __p3_170)}; \
-  __ret_170 = __noswap_vcmla_f32(__rev0_170, __rev1_170, *(float32x2_t *) &__reint1_170); \
-  __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 1, 0); \
-  __ret_170; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_lane_f32(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \
-  float32x4_t __s0_171 = __p0_171; \
-  float32x4_t __s1_171 = __p1_171; \
-  float32x2_t __s2_171 = __p2_171; \
-  float32x4_t __ret_171; \
-float32x2_t __reint_171 = __s2_171; \
-uint64x2_t __reint1_171 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171), vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171)}; \
-  __ret_171 = vcmlaq_f32(__s0_171, __s1_171, *(float32x4_t *) &__reint1_171); \
-  __ret_171; \
-})
-#else
-#define vcmlaq_lane_f32(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \
-  float32x4_t __s0_172 = __p0_172; \
-  float32x4_t __s1_172 = __p1_172; \
-  float32x2_t __s2_172 = __p2_172; \
-  float32x4_t __rev0_172;  __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 3, 2, 1, 0); \
-  float32x4_t __rev1_172;  __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 3, 2, 1, 0); \
-  float32x2_t __rev2_172;  __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 1, 0); \
-  float32x4_t __ret_172; \
-float32x2_t __reint_172 = __rev2_172; \
-uint64x2_t __reint1_172 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172), vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172)}; \
-  __ret_172 = __noswap_vcmlaq_f32(__rev0_172, __rev1_172, *(float32x4_t *) &__reint1_172); \
-  __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 3, 2, 1, 0); \
-  __ret_172; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_laneq_f32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \
-  float32x2_t __s0_173 = __p0_173; \
-  float32x2_t __s1_173 = __p1_173; \
-  float32x4_t __s2_173 = __p2_173; \
-  float32x2_t __ret_173; \
-float32x4_t __reint_173 = __s2_173; \
-uint64x1_t __reint1_173 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_173, __p3_173)}; \
-  __ret_173 = vcmla_f32(__s0_173, __s1_173, *(float32x2_t *) &__reint1_173); \
-  __ret_173; \
-})
-#else
-#define vcmla_laneq_f32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \
-  float32x2_t __s0_174 = __p0_174; \
-  float32x2_t __s1_174 = __p1_174; \
-  float32x4_t __s2_174 = __p2_174; \
-  float32x2_t __rev0_174;  __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \
-  float32x2_t __rev1_174;  __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \
-  float32x4_t __rev2_174;  __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 3, 2, 1, 0); \
-  float32x2_t __ret_174; \
-float32x4_t __reint_174 = __rev2_174; \
-uint64x1_t __reint1_174 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_174, __p3_174)}; \
-  __ret_174 = __noswap_vcmla_f32(__rev0_174, __rev1_174, *(float32x2_t *) &__reint1_174); \
-  __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \
-  __ret_174; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_laneq_f32(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \
-  float32x4_t __s0_175 = __p0_175; \
-  float32x4_t __s1_175 = __p1_175; \
-  float32x4_t __s2_175 = __p2_175; \
-  float32x4_t __ret_175; \
-float32x4_t __reint_175 = __s2_175; \
-uint64x2_t __reint1_175 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175), vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175)}; \
-  __ret_175 = vcmlaq_f32(__s0_175, __s1_175, *(float32x4_t *) &__reint1_175); \
-  __ret_175; \
-})
-#else
-#define vcmlaq_laneq_f32(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \
-  float32x4_t __s0_176 = __p0_176; \
-  float32x4_t __s1_176 = __p1_176; \
-  float32x4_t __s2_176 = __p2_176; \
-  float32x4_t __rev0_176;  __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \
-  float32x4_t __rev1_176;  __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \
-  float32x4_t __rev2_176;  __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \
-  float32x4_t __ret_176; \
-float32x4_t __reint_176 = __rev2_176; \
-uint64x2_t __reint1_176 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176)}; \
-  __ret_176 = __noswap_vcmlaq_f32(__rev0_176, __rev1_176, *(float32x4_t *) &__reint1_176); \
-  __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \
-  __ret_176; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \
-  float32x2_t __s0_177 = __p0_177; \
-  float32x2_t __s1_177 = __p1_177; \
-  float32x2_t __s2_177 = __p2_177; \
-  float32x2_t __ret_177; \
-float32x2_t __reint_177 = __s2_177; \
-uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \
-  __ret_177 = vcmla_rot180_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \
-  __ret_177; \
-})
-#else
-#define vcmla_rot180_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \
-  float32x2_t __s0_178 = __p0_178; \
-  float32x2_t __s1_178 = __p1_178; \
-  float32x2_t __s2_178 = __p2_178; \
-  float32x2_t __rev0_178;  __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \
-  float32x2_t __rev1_178;  __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \
-  float32x2_t __rev2_178;  __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \
-  float32x2_t __ret_178; \
-float32x2_t __reint_178 = __rev2_178; \
-uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \
-  __ret_178 = __noswap_vcmla_rot180_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \
-  __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \
-  __ret_178; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \
-  float32x4_t __s0_179 = __p0_179; \
-  float32x4_t __s1_179 = __p1_179; \
-  float32x2_t __s2_179 = __p2_179; \
-  float32x4_t __ret_179; \
-float32x2_t __reint_179 = __s2_179; \
-uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \
-  __ret_179 = vcmlaq_rot180_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \
-  __ret_179; \
-})
-#else
-#define vcmlaq_rot180_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \
-  float32x4_t __s0_180 = __p0_180; \
-  float32x4_t __s1_180 = __p1_180; \
-  float32x2_t __s2_180 = __p2_180; \
-  float32x4_t __rev0_180;  __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \
-  float32x4_t __rev1_180;  __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \
-  float32x2_t __rev2_180;  __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \
-  float32x4_t __ret_180; \
-float32x2_t __reint_180 = __rev2_180; \
-uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \
-  __ret_180 = __noswap_vcmlaq_rot180_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \
-  __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \
-  __ret_180; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \
-  float32x2_t __s0_181 = __p0_181; \
-  float32x2_t __s1_181 = __p1_181; \
-  float32x4_t __s2_181 = __p2_181; \
-  float32x2_t __ret_181; \
-float32x4_t __reint_181 = __s2_181; \
-uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \
-  __ret_181 = vcmla_rot180_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \
-  __ret_181; \
-})
-#else
-#define vcmla_rot180_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \
-  float32x2_t __s0_182 = __p0_182; \
-  float32x2_t __s1_182 = __p1_182; \
-  float32x4_t __s2_182 = __p2_182; \
-  float32x2_t __rev0_182;  __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \
-  float32x2_t __rev1_182;  __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \
-  float32x4_t __rev2_182;  __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \
-  float32x2_t __ret_182; \
-float32x4_t __reint_182 = __rev2_182; \
-uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \
-  __ret_182 = __noswap_vcmla_rot180_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \
-  __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \
-  __ret_182; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \
-  float32x4_t __s0_183 = __p0_183; \
-  float32x4_t __s1_183 = __p1_183; \
-  float32x4_t __s2_183 = __p2_183; \
-  float32x4_t __ret_183; \
-float32x4_t __reint_183 = __s2_183; \
-uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \
-  __ret_183 = vcmlaq_rot180_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \
-  __ret_183; \
-})
-#else
-#define vcmlaq_rot180_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \
-  float32x4_t __s0_184 = __p0_184; \
-  float32x4_t __s1_184 = __p1_184; \
-  float32x4_t __s2_184 = __p2_184; \
-  float32x4_t __rev0_184;  __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \
-  float32x4_t __rev1_184;  __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \
-  float32x4_t __rev2_184;  __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \
-  float32x4_t __ret_184; \
-float32x4_t __reint_184 = __rev2_184; \
-uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \
-  __ret_184 = __noswap_vcmlaq_rot180_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \
-  __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \
-  __ret_184; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \
-  float32x2_t __s0_185 = __p0_185; \
-  float32x2_t __s1_185 = __p1_185; \
-  float32x2_t __s2_185 = __p2_185; \
-  float32x2_t __ret_185; \
-float32x2_t __reint_185 = __s2_185; \
-uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \
-  __ret_185 = vcmla_rot270_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \
-  __ret_185; \
-})
-#else
-#define vcmla_rot270_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \
-  float32x2_t __s0_186 = __p0_186; \
-  float32x2_t __s1_186 = __p1_186; \
-  float32x2_t __s2_186 = __p2_186; \
-  float32x2_t __rev0_186;  __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \
-  float32x2_t __rev1_186;  __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \
-  float32x2_t __rev2_186;  __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \
-  float32x2_t __ret_186; \
-float32x2_t __reint_186 = __rev2_186; \
-uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \
-  __ret_186 = __noswap_vcmla_rot270_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \
-  __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \
-  __ret_186; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \
-  float32x4_t __s0_187 = __p0_187; \
-  float32x4_t __s1_187 = __p1_187; \
-  float32x2_t __s2_187 = __p2_187; \
-  float32x4_t __ret_187; \
-float32x2_t __reint_187 = __s2_187; \
-uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \
-  __ret_187 = vcmlaq_rot270_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \
-  __ret_187; \
-})
-#else
-#define vcmlaq_rot270_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \
-  float32x4_t __s0_188 = __p0_188; \
-  float32x4_t __s1_188 = __p1_188; \
-  float32x2_t __s2_188 = __p2_188; \
-  float32x4_t __rev0_188;  __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \
-  float32x4_t __rev1_188;  __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \
-  float32x2_t __rev2_188;  __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \
-  float32x4_t __ret_188; \
-float32x2_t __reint_188 = __rev2_188; \
-uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \
-  __ret_188 = __noswap_vcmlaq_rot270_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \
-  __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \
-  __ret_188; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \
-  float32x2_t __s0_189 = __p0_189; \
-  float32x2_t __s1_189 = __p1_189; \
-  float32x4_t __s2_189 = __p2_189; \
-  float32x2_t __ret_189; \
-float32x4_t __reint_189 = __s2_189; \
-uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \
-  __ret_189 = vcmla_rot270_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \
-  __ret_189; \
-})
-#else
-#define vcmla_rot270_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \
-  float32x2_t __s0_190 = __p0_190; \
-  float32x2_t __s1_190 = __p1_190; \
-  float32x4_t __s2_190 = __p2_190; \
-  float32x2_t __rev0_190;  __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \
-  float32x2_t __rev1_190;  __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \
-  float32x4_t __rev2_190;  __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \
-  float32x2_t __ret_190; \
-float32x4_t __reint_190 = __rev2_190; \
-uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \
-  __ret_190 = __noswap_vcmla_rot270_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \
-  __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \
-  __ret_190; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \
-  float32x4_t __s0_191 = __p0_191; \
-  float32x4_t __s1_191 = __p1_191; \
-  float32x4_t __s2_191 = __p2_191; \
-  float32x4_t __ret_191; \
-float32x4_t __reint_191 = __s2_191; \
-uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \
-  __ret_191 = vcmlaq_rot270_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \
-  __ret_191; \
-})
-#else
-#define vcmlaq_rot270_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \
-  float32x4_t __s0_192 = __p0_192; \
-  float32x4_t __s1_192 = __p1_192; \
-  float32x4_t __s2_192 = __p2_192; \
-  float32x4_t __rev0_192;  __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \
-  float32x4_t __rev1_192;  __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \
-  float32x4_t __rev2_192;  __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \
-  float32x4_t __ret_192; \
-float32x4_t __reint_192 = __rev2_192; \
-uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \
-  __ret_192 = __noswap_vcmlaq_rot270_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \
-  __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \
-  __ret_192; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \
-  float32x2_t __s0_193 = __p0_193; \
-  float32x2_t __s1_193 = __p1_193; \
-  float32x2_t __s2_193 = __p2_193; \
-  float32x2_t __ret_193; \
-float32x2_t __reint_193 = __s2_193; \
-uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \
-  __ret_193 = vcmla_rot90_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \
-  __ret_193; \
-})
-#else
-#define vcmla_rot90_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \
-  float32x2_t __s0_194 = __p0_194; \
-  float32x2_t __s1_194 = __p1_194; \
-  float32x2_t __s2_194 = __p2_194; \
-  float32x2_t __rev0_194;  __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \
-  float32x2_t __rev1_194;  __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \
-  float32x2_t __rev2_194;  __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \
-  float32x2_t __ret_194; \
-float32x2_t __reint_194 = __rev2_194; \
-uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \
-  __ret_194 = __noswap_vcmla_rot90_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \
-  __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \
-  __ret_194; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \
-  float32x4_t __s0_195 = __p0_195; \
-  float32x4_t __s1_195 = __p1_195; \
-  float32x2_t __s2_195 = __p2_195; \
-  float32x4_t __ret_195; \
-float32x2_t __reint_195 = __s2_195; \
-uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \
-  __ret_195 = vcmlaq_rot90_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \
-  __ret_195; \
-})
-#else
-#define vcmlaq_rot90_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \
-  float32x4_t __s0_196 = __p0_196; \
-  float32x4_t __s1_196 = __p1_196; \
-  float32x2_t __s2_196 = __p2_196; \
-  float32x4_t __rev0_196;  __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \
-  float32x4_t __rev1_196;  __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \
-  float32x2_t __rev2_196;  __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \
-  float32x4_t __ret_196; \
-float32x2_t __reint_196 = __rev2_196; \
-uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \
-  __ret_196 = __noswap_vcmlaq_rot90_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \
-  __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \
-  __ret_196; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \
-  float32x2_t __s0_197 = __p0_197; \
-  float32x2_t __s1_197 = __p1_197; \
-  float32x4_t __s2_197 = __p2_197; \
-  float32x2_t __ret_197; \
-float32x4_t __reint_197 = __s2_197; \
-uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \
-  __ret_197 = vcmla_rot90_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \
-  __ret_197; \
-})
-#else
-#define vcmla_rot90_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \
-  float32x2_t __s0_198 = __p0_198; \
-  float32x2_t __s1_198 = __p1_198; \
-  float32x4_t __s2_198 = __p2_198; \
-  float32x2_t __rev0_198;  __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \
-  float32x2_t __rev1_198;  __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \
-  float32x4_t __rev2_198;  __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \
-  float32x2_t __ret_198; \
-float32x4_t __reint_198 = __rev2_198; \
-uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \
-  __ret_198 = __noswap_vcmla_rot90_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \
-  __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \
-  __ret_198; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \
-  float32x4_t __s0_199 = __p0_199; \
-  float32x4_t __s1_199 = __p1_199; \
-  float32x4_t __s2_199 = __p2_199; \
-  float32x4_t __ret_199; \
-float32x4_t __reint_199 = __s2_199; \
-uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \
-  __ret_199 = vcmlaq_rot90_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \
-  __ret_199; \
-})
-#else
-#define vcmlaq_rot90_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \
-  float32x4_t __s0_200 = __p0_200; \
-  float32x4_t __s1_200 = __p1_200; \
-  float32x4_t __s2_200 = __p2_200; \
-  float32x4_t __rev0_200;  __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \
-  float32x4_t __rev1_200;  __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \
-  float32x4_t __rev2_200;  __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \
-  float32x4_t __ret_200; \
-float32x4_t __reint_200 = __rev2_200; \
-uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \
-  __ret_200 = __noswap_vcmlaq_rot90_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \
-  __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \
-  __ret_200; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_lane_f16(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \
-  float16x4_t __s0_201 = __p0_201; \
-  float16x4_t __s1_201 = __p1_201; \
-  float16x4_t __s2_201 = __p2_201; \
-  float16x4_t __ret_201; \
-float16x4_t __reint_201 = __s2_201; \
-uint32x2_t __reint1_201 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201), vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201)}; \
-  __ret_201 = vcmla_f16(__s0_201, __s1_201, *(float16x4_t *) &__reint1_201); \
-  __ret_201; \
-})
-#else
-#define vcmla_lane_f16(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \
-  float16x4_t __s0_202 = __p0_202; \
-  float16x4_t __s1_202 = __p1_202; \
-  float16x4_t __s2_202 = __p2_202; \
-  float16x4_t __rev0_202;  __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 3, 2, 1, 0); \
-  float16x4_t __rev1_202;  __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 3, 2, 1, 0); \
-  float16x4_t __rev2_202;  __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 3, 2, 1, 0); \
-  float16x4_t __ret_202; \
-float16x4_t __reint_202 = __rev2_202; \
-uint32x2_t __reint1_202 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202)}; \
-  __ret_202 = __noswap_vcmla_f16(__rev0_202, __rev1_202, *(float16x4_t *) &__reint1_202); \
-  __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 3, 2, 1, 0); \
-  __ret_202; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_lane_f16(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \
-  float16x8_t __s0_203 = __p0_203; \
-  float16x8_t __s1_203 = __p1_203; \
-  float16x4_t __s2_203 = __p2_203; \
-  float16x8_t __ret_203; \
-float16x4_t __reint_203 = __s2_203; \
-uint32x4_t __reint1_203 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203)}; \
-  __ret_203 = vcmlaq_f16(__s0_203, __s1_203, *(float16x8_t *) &__reint1_203); \
-  __ret_203; \
-})
-#else
-#define vcmlaq_lane_f16(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \
-  float16x8_t __s0_204 = __p0_204; \
-  float16x8_t __s1_204 = __p1_204; \
-  float16x4_t __s2_204 = __p2_204; \
-  float16x8_t __rev0_204;  __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_204;  __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_204;  __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 3, 2, 1, 0); \
-  float16x8_t __ret_204; \
-float16x4_t __reint_204 = __rev2_204; \
-uint32x4_t __reint1_204 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204)}; \
-  __ret_204 = __noswap_vcmlaq_f16(__rev0_204, __rev1_204, *(float16x8_t *) &__reint1_204); \
-  __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_204; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_laneq_f16(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \
-  float16x4_t __s0_205 = __p0_205; \
-  float16x4_t __s1_205 = __p1_205; \
-  float16x8_t __s2_205 = __p2_205; \
-  float16x4_t __ret_205; \
-float16x8_t __reint_205 = __s2_205; \
-uint32x2_t __reint1_205 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205), vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205)}; \
-  __ret_205 = vcmla_f16(__s0_205, __s1_205, *(float16x4_t *) &__reint1_205); \
-  __ret_205; \
-})
-#else
-#define vcmla_laneq_f16(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \
-  float16x4_t __s0_206 = __p0_206; \
-  float16x4_t __s1_206 = __p1_206; \
-  float16x8_t __s2_206 = __p2_206; \
-  float16x4_t __rev0_206;  __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 3, 2, 1, 0); \
-  float16x4_t __rev1_206;  __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 3, 2, 1, 0); \
-  float16x8_t __rev2_206;  __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_206; \
-float16x8_t __reint_206 = __rev2_206; \
-uint32x2_t __reint1_206 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206)}; \
-  __ret_206 = __noswap_vcmla_f16(__rev0_206, __rev1_206, *(float16x4_t *) &__reint1_206); \
-  __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 3, 2, 1, 0); \
-  __ret_206; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_laneq_f16(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \
-  float16x8_t __s0_207 = __p0_207; \
-  float16x8_t __s1_207 = __p1_207; \
-  float16x8_t __s2_207 = __p2_207; \
-  float16x8_t __ret_207; \
-float16x8_t __reint_207 = __s2_207; \
-uint32x4_t __reint1_207 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207)}; \
-  __ret_207 = vcmlaq_f16(__s0_207, __s1_207, *(float16x8_t *) &__reint1_207); \
-  __ret_207; \
-})
-#else
-#define vcmlaq_laneq_f16(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \
-  float16x8_t __s0_208 = __p0_208; \
-  float16x8_t __s1_208 = __p1_208; \
-  float16x8_t __s2_208 = __p2_208; \
-  float16x8_t __rev0_208;  __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_208;  __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_208;  __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_208; \
-float16x8_t __reint_208 = __rev2_208; \
-uint32x4_t __reint1_208 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208)}; \
-  __ret_208 = __noswap_vcmlaq_f16(__rev0_208, __rev1_208, *(float16x8_t *) &__reint1_208); \
-  __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_208; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \
-  float16x4_t __s0_209 = __p0_209; \
-  float16x4_t __s1_209 = __p1_209; \
-  float16x4_t __s2_209 = __p2_209; \
-  float16x4_t __ret_209; \
-float16x4_t __reint_209 = __s2_209; \
-uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \
-  __ret_209 = vcmla_rot180_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \
-  __ret_209; \
-})
-#else
-#define vcmla_rot180_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \
-  float16x4_t __s0_210 = __p0_210; \
-  float16x4_t __s1_210 = __p1_210; \
-  float16x4_t __s2_210 = __p2_210; \
-  float16x4_t __rev0_210;  __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \
-  float16x4_t __rev1_210;  __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \
-  float16x4_t __rev2_210;  __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \
-  float16x4_t __ret_210; \
-float16x4_t __reint_210 = __rev2_210; \
-uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \
-  __ret_210 = __noswap_vcmla_rot180_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \
-  __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \
-  __ret_210; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \
-  float16x8_t __s0_211 = __p0_211; \
-  float16x8_t __s1_211 = __p1_211; \
-  float16x4_t __s2_211 = __p2_211; \
-  float16x8_t __ret_211; \
-float16x4_t __reint_211 = __s2_211; \
-uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \
-  __ret_211 = vcmlaq_rot180_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \
-  __ret_211; \
-})
-#else
-#define vcmlaq_rot180_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \
-  float16x8_t __s0_212 = __p0_212; \
-  float16x8_t __s1_212 = __p1_212; \
-  float16x4_t __s2_212 = __p2_212; \
-  float16x8_t __rev0_212;  __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_212;  __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_212;  __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \
-  float16x8_t __ret_212; \
-float16x4_t __reint_212 = __rev2_212; \
-uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \
-  __ret_212 = __noswap_vcmlaq_rot180_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \
-  __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_212; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \
-  float16x4_t __s0_213 = __p0_213; \
-  float16x4_t __s1_213 = __p1_213; \
-  float16x8_t __s2_213 = __p2_213; \
-  float16x4_t __ret_213; \
-float16x8_t __reint_213 = __s2_213; \
-uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \
-  __ret_213 = vcmla_rot180_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \
-  __ret_213; \
-})
-#else
-#define vcmla_rot180_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \
-  float16x4_t __s0_214 = __p0_214; \
-  float16x4_t __s1_214 = __p1_214; \
-  float16x8_t __s2_214 = __p2_214; \
-  float16x4_t __rev0_214;  __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \
-  float16x4_t __rev1_214;  __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \
-  float16x8_t __rev2_214;  __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_214; \
-float16x8_t __reint_214 = __rev2_214; \
-uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \
-  __ret_214 = __noswap_vcmla_rot180_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \
-  __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \
-  __ret_214; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \
-  float16x8_t __s0_215 = __p0_215; \
-  float16x8_t __s1_215 = __p1_215; \
-  float16x8_t __s2_215 = __p2_215; \
-  float16x8_t __ret_215; \
-float16x8_t __reint_215 = __s2_215; \
-uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \
-  __ret_215 = vcmlaq_rot180_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \
-  __ret_215; \
-})
-#else
-#define vcmlaq_rot180_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \
-  float16x8_t __s0_216 = __p0_216; \
-  float16x8_t __s1_216 = __p1_216; \
-  float16x8_t __s2_216 = __p2_216; \
-  float16x8_t __rev0_216;  __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_216;  __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_216;  __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_216; \
-float16x8_t __reint_216 = __rev2_216; \
-uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \
-  __ret_216 = __noswap_vcmlaq_rot180_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \
-  __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_216; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \
-  float16x4_t __s0_217 = __p0_217; \
-  float16x4_t __s1_217 = __p1_217; \
-  float16x4_t __s2_217 = __p2_217; \
-  float16x4_t __ret_217; \
-float16x4_t __reint_217 = __s2_217; \
-uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \
-  __ret_217 = vcmla_rot270_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \
-  __ret_217; \
-})
-#else
-#define vcmla_rot270_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \
-  float16x4_t __s0_218 = __p0_218; \
-  float16x4_t __s1_218 = __p1_218; \
-  float16x4_t __s2_218 = __p2_218; \
-  float16x4_t __rev0_218;  __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \
-  float16x4_t __rev1_218;  __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \
-  float16x4_t __rev2_218;  __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \
-  float16x4_t __ret_218; \
-float16x4_t __reint_218 = __rev2_218; \
-uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \
-  __ret_218 = __noswap_vcmla_rot270_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \
-  __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \
-  __ret_218; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \
-  float16x8_t __s0_219 = __p0_219; \
-  float16x8_t __s1_219 = __p1_219; \
-  float16x4_t __s2_219 = __p2_219; \
-  float16x8_t __ret_219; \
-float16x4_t __reint_219 = __s2_219; \
-uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \
-  __ret_219 = vcmlaq_rot270_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \
-  __ret_219; \
-})
-#else
-#define vcmlaq_rot270_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \
-  float16x8_t __s0_220 = __p0_220; \
-  float16x8_t __s1_220 = __p1_220; \
-  float16x4_t __s2_220 = __p2_220; \
-  float16x8_t __rev0_220;  __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_220;  __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_220;  __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \
-  float16x8_t __ret_220; \
-float16x4_t __reint_220 = __rev2_220; \
-uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \
-  __ret_220 = __noswap_vcmlaq_rot270_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \
-  __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_220; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \
-  float16x4_t __s0_221 = __p0_221; \
-  float16x4_t __s1_221 = __p1_221; \
-  float16x8_t __s2_221 = __p2_221; \
-  float16x4_t __ret_221; \
-float16x8_t __reint_221 = __s2_221; \
-uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \
-  __ret_221 = vcmla_rot270_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \
-  __ret_221; \
-})
-#else
-#define vcmla_rot270_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \
-  float16x4_t __s0_222 = __p0_222; \
-  float16x4_t __s1_222 = __p1_222; \
-  float16x8_t __s2_222 = __p2_222; \
-  float16x4_t __rev0_222;  __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \
-  float16x4_t __rev1_222;  __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \
-  float16x8_t __rev2_222;  __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_222; \
-float16x8_t __reint_222 = __rev2_222; \
-uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \
-  __ret_222 = __noswap_vcmla_rot270_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \
-  __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \
-  __ret_222; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \
-  float16x8_t __s0_223 = __p0_223; \
-  float16x8_t __s1_223 = __p1_223; \
-  float16x8_t __s2_223 = __p2_223; \
-  float16x8_t __ret_223; \
-float16x8_t __reint_223 = __s2_223; \
-uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \
-  __ret_223 = vcmlaq_rot270_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \
-  __ret_223; \
-})
-#else
-#define vcmlaq_rot270_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \
-  float16x8_t __s0_224 = __p0_224; \
-  float16x8_t __s1_224 = __p1_224; \
-  float16x8_t __s2_224 = __p2_224; \
-  float16x8_t __rev0_224;  __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_224;  __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_224;  __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_224; \
-float16x8_t __reint_224 = __rev2_224; \
-uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \
-  __ret_224 = __noswap_vcmlaq_rot270_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \
-  __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_224; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \
-  float16x4_t __s0_225 = __p0_225; \
-  float16x4_t __s1_225 = __p1_225; \
-  float16x4_t __s2_225 = __p2_225; \
-  float16x4_t __ret_225; \
-float16x4_t __reint_225 = __s2_225; \
-uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \
-  __ret_225 = vcmla_rot90_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \
-  __ret_225; \
-})
-#else
-#define vcmla_rot90_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \
-  float16x4_t __s0_226 = __p0_226; \
-  float16x4_t __s1_226 = __p1_226; \
-  float16x4_t __s2_226 = __p2_226; \
-  float16x4_t __rev0_226;  __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \
-  float16x4_t __rev1_226;  __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \
-  float16x4_t __rev2_226;  __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \
-  float16x4_t __ret_226; \
-float16x4_t __reint_226 = __rev2_226; \
-uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \
-  __ret_226 = __noswap_vcmla_rot90_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \
-  __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \
-  __ret_226; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \
-  float16x8_t __s0_227 = __p0_227; \
-  float16x8_t __s1_227 = __p1_227; \
-  float16x4_t __s2_227 = __p2_227; \
-  float16x8_t __ret_227; \
-float16x4_t __reint_227 = __s2_227; \
-uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \
-  __ret_227 = vcmlaq_rot90_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \
-  __ret_227; \
-})
-#else
-#define vcmlaq_rot90_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \
-  float16x8_t __s0_228 = __p0_228; \
-  float16x8_t __s1_228 = __p1_228; \
-  float16x4_t __s2_228 = __p2_228; \
-  float16x8_t __rev0_228;  __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_228;  __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_228;  __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \
-  float16x8_t __ret_228; \
-float16x4_t __reint_228 = __rev2_228; \
-uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \
-  __ret_228 = __noswap_vcmlaq_rot90_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \
-  __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_228; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \
-  float16x4_t __s0_229 = __p0_229; \
-  float16x4_t __s1_229 = __p1_229; \
-  float16x8_t __s2_229 = __p2_229; \
-  float16x4_t __ret_229; \
-float16x8_t __reint_229 = __s2_229; \
-uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \
-  __ret_229 = vcmla_rot90_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \
-  __ret_229; \
-})
-#else
-#define vcmla_rot90_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \
-  float16x4_t __s0_230 = __p0_230; \
-  float16x4_t __s1_230 = __p1_230; \
-  float16x8_t __s2_230 = __p2_230; \
-  float16x4_t __rev0_230;  __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \
-  float16x4_t __rev1_230;  __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \
-  float16x8_t __rev2_230;  __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_230; \
-float16x8_t __reint_230 = __rev2_230; \
-uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \
-  __ret_230 = __noswap_vcmla_rot90_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \
-  __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \
-  __ret_230; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \
-  float16x8_t __s0_231 = __p0_231; \
-  float16x8_t __s1_231 = __p1_231; \
-  float16x8_t __s2_231 = __p2_231; \
-  float16x8_t __ret_231; \
-float16x8_t __reint_231 = __s2_231; \
-uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \
-  __ret_231 = vcmlaq_rot90_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \
-  __ret_231; \
-})
-#else
-#define vcmlaq_rot90_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \
-  float16x8_t __s0_232 = __p0_232; \
-  float16x8_t __s1_232 = __p1_232; \
-  float16x8_t __s2_232 = __p2_232; \
-  float16x8_t __rev0_232;  __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_232;  __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_232;  __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_232; \
-float16x8_t __reint_232 = __rev2_232; \
-uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \
-  __ret_232 = __noswap_vcmlaq_rot90_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \
-  __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_232; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_lane_f64(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \
-  float64x1_t __s0_233 = __p0_233; \
-  float64x1_t __s1_233 = __p1_233; \
-  float64x1_t __s2_233 = __p2_233; \
-  float64x1_t __ret_233; \
-float64x1_t __reint_233 = __s2_233; \
-uint64x2_t __reint1_233 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233), vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233)}; \
-  __ret_233 = vcmla_f64(__s0_233, __s1_233, *(float64x1_t *) &__reint1_233); \
-  __ret_233; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_lane_f64(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \
-  float64x2_t __s0_234 = __p0_234; \
-  float64x2_t __s1_234 = __p1_234; \
-  float64x1_t __s2_234 = __p2_234; \
-  float64x2_t __ret_234; \
-float64x1_t __reint_234 = __s2_234; \
-uint64x2_t __reint1_234 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234), vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234)}; \
-  __ret_234 = vcmlaq_f64(__s0_234, __s1_234, *(float64x2_t *) &__reint1_234); \
-  __ret_234; \
-})
-#else
-#define vcmlaq_lane_f64(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \
-  float64x2_t __s0_235 = __p0_235; \
-  float64x2_t __s1_235 = __p1_235; \
-  float64x1_t __s2_235 = __p2_235; \
-  float64x2_t __rev0_235;  __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 1, 0); \
-  float64x2_t __rev1_235;  __rev1_235 = __builtin_shufflevector(__s1_235, __s1_235, 1, 0); \
-  float64x2_t __ret_235; \
-float64x1_t __reint_235 = __s2_235; \
-uint64x2_t __reint1_235 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235)}; \
-  __ret_235 = __noswap_vcmlaq_f64(__rev0_235, __rev1_235, *(float64x2_t *) &__reint1_235); \
-  __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 1, 0); \
-  __ret_235; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_laneq_f64(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \
-  float64x1_t __s0_236 = __p0_236; \
-  float64x1_t __s1_236 = __p1_236; \
-  float64x2_t __s2_236 = __p2_236; \
-  float64x1_t __ret_236; \
-float64x2_t __reint_236 = __s2_236; \
-uint64x2_t __reint1_236 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236), vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236)}; \
-  __ret_236 = vcmla_f64(__s0_236, __s1_236, *(float64x1_t *) &__reint1_236); \
-  __ret_236; \
-})
-#else
-#define vcmla_laneq_f64(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \
-  float64x1_t __s0_237 = __p0_237; \
-  float64x1_t __s1_237 = __p1_237; \
-  float64x2_t __s2_237 = __p2_237; \
-  float64x2_t __rev2_237;  __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, 1, 0); \
-  float64x1_t __ret_237; \
-float64x2_t __reint_237 = __rev2_237; \
-uint64x2_t __reint1_237 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237)}; \
-  __ret_237 = vcmla_f64(__s0_237, __s1_237, *(float64x1_t *) &__reint1_237); \
-  __ret_237; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_laneq_f64(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \
-  float64x2_t __s0_238 = __p0_238; \
-  float64x2_t __s1_238 = __p1_238; \
-  float64x2_t __s2_238 = __p2_238; \
-  float64x2_t __ret_238; \
-float64x2_t __reint_238 = __s2_238; \
-uint64x2_t __reint1_238 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238), vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238)}; \
-  __ret_238 = vcmlaq_f64(__s0_238, __s1_238, *(float64x2_t *) &__reint1_238); \
-  __ret_238; \
-})
-#else
-#define vcmlaq_laneq_f64(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \
-  float64x2_t __s0_239 = __p0_239; \
-  float64x2_t __s1_239 = __p1_239; \
-  float64x2_t __s2_239 = __p2_239; \
-  float64x2_t __rev0_239;  __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \
-  float64x2_t __rev1_239;  __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \
-  float64x2_t __rev2_239;  __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, 1, 0); \
-  float64x2_t __ret_239; \
-float64x2_t __reint_239 = __rev2_239; \
-uint64x2_t __reint1_239 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239)}; \
-  __ret_239 = __noswap_vcmlaq_f64(__rev0_239, __rev1_239, *(float64x2_t *) &__reint1_239); \
-  __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 1, 0); \
-  __ret_239; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_rot180_lane_f64(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \
-  float64x1_t __s0_240 = __p0_240; \
-  float64x1_t __s1_240 = __p1_240; \
-  float64x1_t __s2_240 = __p2_240; \
-  float64x1_t __ret_240; \
-float64x1_t __reint_240 = __s2_240; \
-uint64x2_t __reint1_240 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240), vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240)}; \
-  __ret_240 = vcmla_rot180_f64(__s0_240, __s1_240, *(float64x1_t *) &__reint1_240); \
-  __ret_240; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_lane_f64(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \
-  float64x2_t __s0_241 = __p0_241; \
-  float64x2_t __s1_241 = __p1_241; \
-  float64x1_t __s2_241 = __p2_241; \
-  float64x2_t __ret_241; \
-float64x1_t __reint_241 = __s2_241; \
-uint64x2_t __reint1_241 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241), vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241)}; \
-  __ret_241 = vcmlaq_rot180_f64(__s0_241, __s1_241, *(float64x2_t *) &__reint1_241); \
-  __ret_241; \
-})
-#else
-#define vcmlaq_rot180_lane_f64(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \
-  float64x2_t __s0_242 = __p0_242; \
-  float64x2_t __s1_242 = __p1_242; \
-  float64x1_t __s2_242 = __p2_242; \
-  float64x2_t __rev0_242;  __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 1, 0); \
-  float64x2_t __rev1_242;  __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \
-  float64x2_t __ret_242; \
-float64x1_t __reint_242 = __s2_242; \
-uint64x2_t __reint1_242 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242)}; \
-  __ret_242 = __noswap_vcmlaq_rot180_f64(__rev0_242, __rev1_242, *(float64x2_t *) &__reint1_242); \
-  __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 1, 0); \
-  __ret_242; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot180_laneq_f64(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \
-  float64x1_t __s0_243 = __p0_243; \
-  float64x1_t __s1_243 = __p1_243; \
-  float64x2_t __s2_243 = __p2_243; \
-  float64x1_t __ret_243; \
-float64x2_t __reint_243 = __s2_243; \
-uint64x2_t __reint1_243 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243), vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243)}; \
-  __ret_243 = vcmla_rot180_f64(__s0_243, __s1_243, *(float64x1_t *) &__reint1_243); \
-  __ret_243; \
-})
-#else
-#define vcmla_rot180_laneq_f64(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \
-  float64x1_t __s0_244 = __p0_244; \
-  float64x1_t __s1_244 = __p1_244; \
-  float64x2_t __s2_244 = __p2_244; \
-  float64x2_t __rev2_244;  __rev2_244 = __builtin_shufflevector(__s2_244, __s2_244, 1, 0); \
-  float64x1_t __ret_244; \
-float64x2_t __reint_244 = __rev2_244; \
-uint64x2_t __reint1_244 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244)}; \
-  __ret_244 = vcmla_rot180_f64(__s0_244, __s1_244, *(float64x1_t *) &__reint1_244); \
-  __ret_244; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot180_laneq_f64(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \
-  float64x2_t __s0_245 = __p0_245; \
-  float64x2_t __s1_245 = __p1_245; \
-  float64x2_t __s2_245 = __p2_245; \
-  float64x2_t __ret_245; \
-float64x2_t __reint_245 = __s2_245; \
-uint64x2_t __reint1_245 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245), vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245)}; \
-  __ret_245 = vcmlaq_rot180_f64(__s0_245, __s1_245, *(float64x2_t *) &__reint1_245); \
-  __ret_245; \
-})
-#else
-#define vcmlaq_rot180_laneq_f64(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \
-  float64x2_t __s0_246 = __p0_246; \
-  float64x2_t __s1_246 = __p1_246; \
-  float64x2_t __s2_246 = __p2_246; \
-  float64x2_t __rev0_246;  __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \
-  float64x2_t __rev1_246;  __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \
-  float64x2_t __rev2_246;  __rev2_246 = __builtin_shufflevector(__s2_246, __s2_246, 1, 0); \
-  float64x2_t __ret_246; \
-float64x2_t __reint_246 = __rev2_246; \
-uint64x2_t __reint1_246 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246)}; \
-  __ret_246 = __noswap_vcmlaq_rot180_f64(__rev0_246, __rev1_246, *(float64x2_t *) &__reint1_246); \
-  __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \
-  __ret_246; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_rot270_lane_f64(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \
-  float64x1_t __s0_247 = __p0_247; \
-  float64x1_t __s1_247 = __p1_247; \
-  float64x1_t __s2_247 = __p2_247; \
-  float64x1_t __ret_247; \
-float64x1_t __reint_247 = __s2_247; \
-uint64x2_t __reint1_247 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247), vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247)}; \
-  __ret_247 = vcmla_rot270_f64(__s0_247, __s1_247, *(float64x1_t *) &__reint1_247); \
-  __ret_247; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_lane_f64(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \
-  float64x2_t __s0_248 = __p0_248; \
-  float64x2_t __s1_248 = __p1_248; \
-  float64x1_t __s2_248 = __p2_248; \
-  float64x2_t __ret_248; \
-float64x1_t __reint_248 = __s2_248; \
-uint64x2_t __reint1_248 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248), vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248)}; \
-  __ret_248 = vcmlaq_rot270_f64(__s0_248, __s1_248, *(float64x2_t *) &__reint1_248); \
-  __ret_248; \
-})
-#else
-#define vcmlaq_rot270_lane_f64(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \
-  float64x2_t __s0_249 = __p0_249; \
-  float64x2_t __s1_249 = __p1_249; \
-  float64x1_t __s2_249 = __p2_249; \
-  float64x2_t __rev0_249;  __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 1, 0); \
-  float64x2_t __rev1_249;  __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 1, 0); \
-  float64x2_t __ret_249; \
-float64x1_t __reint_249 = __s2_249; \
-uint64x2_t __reint1_249 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249)}; \
-  __ret_249 = __noswap_vcmlaq_rot270_f64(__rev0_249, __rev1_249, *(float64x2_t *) &__reint1_249); \
-  __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 1, 0); \
-  __ret_249; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot270_laneq_f64(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \
-  float64x1_t __s0_250 = __p0_250; \
-  float64x1_t __s1_250 = __p1_250; \
-  float64x2_t __s2_250 = __p2_250; \
-  float64x1_t __ret_250; \
-float64x2_t __reint_250 = __s2_250; \
-uint64x2_t __reint1_250 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250), vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250)}; \
-  __ret_250 = vcmla_rot270_f64(__s0_250, __s1_250, *(float64x1_t *) &__reint1_250); \
-  __ret_250; \
-})
-#else
-#define vcmla_rot270_laneq_f64(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \
-  float64x1_t __s0_251 = __p0_251; \
-  float64x1_t __s1_251 = __p1_251; \
-  float64x2_t __s2_251 = __p2_251; \
-  float64x2_t __rev2_251;  __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 1, 0); \
-  float64x1_t __ret_251; \
-float64x2_t __reint_251 = __rev2_251; \
-uint64x2_t __reint1_251 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251)}; \
-  __ret_251 = vcmla_rot270_f64(__s0_251, __s1_251, *(float64x1_t *) &__reint1_251); \
-  __ret_251; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot270_laneq_f64(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \
-  float64x2_t __s0_252 = __p0_252; \
-  float64x2_t __s1_252 = __p1_252; \
-  float64x2_t __s2_252 = __p2_252; \
-  float64x2_t __ret_252; \
-float64x2_t __reint_252 = __s2_252; \
-uint64x2_t __reint1_252 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252), vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252)}; \
-  __ret_252 = vcmlaq_rot270_f64(__s0_252, __s1_252, *(float64x2_t *) &__reint1_252); \
-  __ret_252; \
-})
-#else
-#define vcmlaq_rot270_laneq_f64(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \
-  float64x2_t __s0_253 = __p0_253; \
-  float64x2_t __s1_253 = __p1_253; \
-  float64x2_t __s2_253 = __p2_253; \
-  float64x2_t __rev0_253;  __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 1, 0); \
-  float64x2_t __rev1_253;  __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 1, 0); \
-  float64x2_t __rev2_253;  __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 1, 0); \
-  float64x2_t __ret_253; \
-float64x2_t __reint_253 = __rev2_253; \
-uint64x2_t __reint1_253 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253)}; \
-  __ret_253 = __noswap_vcmlaq_rot270_f64(__rev0_253, __rev1_253, *(float64x2_t *) &__reint1_253); \
-  __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 1, 0); \
-  __ret_253; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vcmla_rot90_lane_f64(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \
-  float64x1_t __s0_254 = __p0_254; \
-  float64x1_t __s1_254 = __p1_254; \
-  float64x1_t __s2_254 = __p2_254; \
-  float64x1_t __ret_254; \
-float64x1_t __reint_254 = __s2_254; \
-uint64x2_t __reint1_254 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254), vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254)}; \
-  __ret_254 = vcmla_rot90_f64(__s0_254, __s1_254, *(float64x1_t *) &__reint1_254); \
-  __ret_254; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_lane_f64(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \
-  float64x2_t __s0_255 = __p0_255; \
-  float64x2_t __s1_255 = __p1_255; \
-  float64x1_t __s2_255 = __p2_255; \
-  float64x2_t __ret_255; \
-float64x1_t __reint_255 = __s2_255; \
-uint64x2_t __reint1_255 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255), vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255)}; \
-  __ret_255 = vcmlaq_rot90_f64(__s0_255, __s1_255, *(float64x2_t *) &__reint1_255); \
-  __ret_255; \
-})
-#else
-#define vcmlaq_rot90_lane_f64(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \
-  float64x2_t __s0_256 = __p0_256; \
-  float64x2_t __s1_256 = __p1_256; \
-  float64x1_t __s2_256 = __p2_256; \
-  float64x2_t __rev0_256;  __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 1, 0); \
-  float64x2_t __rev1_256;  __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 1, 0); \
-  float64x2_t __ret_256; \
-float64x1_t __reint_256 = __s2_256; \
-uint64x2_t __reint1_256 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256)}; \
-  __ret_256 = __noswap_vcmlaq_rot90_f64(__rev0_256, __rev1_256, *(float64x2_t *) &__reint1_256); \
-  __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 1, 0); \
-  __ret_256; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmla_rot90_laneq_f64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \
-  float64x1_t __s0_257 = __p0_257; \
-  float64x1_t __s1_257 = __p1_257; \
-  float64x2_t __s2_257 = __p2_257; \
-  float64x1_t __ret_257; \
-float64x2_t __reint_257 = __s2_257; \
-uint64x2_t __reint1_257 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257), vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257)}; \
-  __ret_257 = vcmla_rot90_f64(__s0_257, __s1_257, *(float64x1_t *) &__reint1_257); \
-  __ret_257; \
-})
-#else
-#define vcmla_rot90_laneq_f64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
-  float64x1_t __s0_258 = __p0_258; \
-  float64x1_t __s1_258 = __p1_258; \
-  float64x2_t __s2_258 = __p2_258; \
-  float64x2_t __rev2_258;  __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 1, 0); \
-  float64x1_t __ret_258; \
-float64x2_t __reint_258 = __rev2_258; \
-uint64x2_t __reint1_258 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258)}; \
-  __ret_258 = vcmla_rot90_f64(__s0_258, __s1_258, *(float64x1_t *) &__reint1_258); \
-  __ret_258; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcmlaq_rot90_laneq_f64(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
-  float64x2_t __s0_259 = __p0_259; \
-  float64x2_t __s1_259 = __p1_259; \
-  float64x2_t __s2_259 = __p2_259; \
-  float64x2_t __ret_259; \
-float64x2_t __reint_259 = __s2_259; \
-uint64x2_t __reint1_259 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259), vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259)}; \
-  __ret_259 = vcmlaq_rot90_f64(__s0_259, __s1_259, *(float64x2_t *) &__reint1_259); \
-  __ret_259; \
-})
-#else
-#define vcmlaq_rot90_laneq_f64(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
-  float64x2_t __s0_260 = __p0_260; \
-  float64x2_t __s1_260 = __p1_260; \
-  float64x2_t __s2_260 = __p2_260; \
-  float64x2_t __rev0_260;  __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 1, 0); \
-  float64x2_t __rev1_260;  __rev1_260 = __builtin_shufflevector(__s1_260, __s1_260, 1, 0); \
-  float64x2_t __rev2_260;  __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 1, 0); \
-  float64x2_t __ret_260; \
-float64x2_t __reint_260 = __rev2_260; \
-uint64x2_t __reint1_260 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260)}; \
-  __ret_260 = __noswap_vcmlaq_rot90_f64(__rev0_260, __rev1_260, *(float64x2_t *) &__reint1_260); \
-  __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 1, 0); \
-  __ret_260; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_u32(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
-  uint32x4_t __s0_261 = __p0_261; \
-  uint8x16_t __s1_261 = __p1_261; \
-  uint8x8_t __s2_261 = __p2_261; \
-  uint32x4_t __ret_261; \
-uint8x8_t __reint_261 = __s2_261; \
-uint32x4_t __reint1_261 = splatq_lane_u32(*(uint32x2_t *) &__reint_261, __p3_261); \
-  __ret_261 = vdotq_u32(__s0_261, __s1_261, *(uint8x16_t *) &__reint1_261); \
-  __ret_261; \
-})
-#else
-#define vdotq_lane_u32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
-  uint32x4_t __s0_262 = __p0_262; \
-  uint8x16_t __s1_262 = __p1_262; \
-  uint8x8_t __s2_262 = __p2_262; \
-  uint32x4_t __rev0_262;  __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 3, 2, 1, 0); \
-  uint8x16_t __rev1_262;  __rev1_262 = __builtin_shufflevector(__s1_262, __s1_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_262;  __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_262; \
-uint8x8_t __reint_262 = __rev2_262; \
-uint32x4_t __reint1_262 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_262, __p3_262); \
-  __ret_262 = __noswap_vdotq_u32(__rev0_262, __rev1_262, *(uint8x16_t *) &__reint1_262); \
-  __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 3, 2, 1, 0); \
-  __ret_262; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
-  int32x4_t __s0_263 = __p0_263; \
-  int8x16_t __s1_263 = __p1_263; \
-  int8x8_t __s2_263 = __p2_263; \
-  int32x4_t __ret_263; \
-int8x8_t __reint_263 = __s2_263; \
-int32x4_t __reint1_263 = splatq_lane_s32(*(int32x2_t *) &__reint_263, __p3_263); \
-  __ret_263 = vdotq_s32(__s0_263, __s1_263, *(int8x16_t *) &__reint1_263); \
-  __ret_263; \
-})
-#else
-#define vdotq_lane_s32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
-  int32x4_t __s0_264 = __p0_264; \
-  int8x16_t __s1_264 = __p1_264; \
-  int8x8_t __s2_264 = __p2_264; \
-  int32x4_t __rev0_264;  __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \
-  int8x16_t __rev1_264;  __rev1_264 = __builtin_shufflevector(__s1_264, __s1_264, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_264;  __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_264; \
-int8x8_t __reint_264 = __rev2_264; \
-int32x4_t __reint1_264 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_264, __p3_264); \
-  __ret_264 = __noswap_vdotq_s32(__rev0_264, __rev1_264, *(int8x16_t *) &__reint1_264); \
-  __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \
-  __ret_264; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_u32(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
-  uint32x2_t __s0_265 = __p0_265; \
-  uint8x8_t __s1_265 = __p1_265; \
-  uint8x8_t __s2_265 = __p2_265; \
-  uint32x2_t __ret_265; \
-uint8x8_t __reint_265 = __s2_265; \
-uint32x2_t __reint1_265 = splat_lane_u32(*(uint32x2_t *) &__reint_265, __p3_265); \
-  __ret_265 = vdot_u32(__s0_265, __s1_265, *(uint8x8_t *) &__reint1_265); \
-  __ret_265; \
-})
-#else
-#define vdot_lane_u32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
-  uint32x2_t __s0_266 = __p0_266; \
-  uint8x8_t __s1_266 = __p1_266; \
-  uint8x8_t __s2_266 = __p2_266; \
-  uint32x2_t __rev0_266;  __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \
-  uint8x8_t __rev1_266;  __rev1_266 = __builtin_shufflevector(__s1_266, __s1_266, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_266;  __rev2_266 = __builtin_shufflevector(__s2_266, __s2_266, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret_266; \
-uint8x8_t __reint_266 = __rev2_266; \
-uint32x2_t __reint1_266 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_266, __p3_266); \
-  __ret_266 = __noswap_vdot_u32(__rev0_266, __rev1_266, *(uint8x8_t *) &__reint1_266); \
-  __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \
-  __ret_266; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_s32(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
-  int32x2_t __s0_267 = __p0_267; \
-  int8x8_t __s1_267 = __p1_267; \
-  int8x8_t __s2_267 = __p2_267; \
-  int32x2_t __ret_267; \
-int8x8_t __reint_267 = __s2_267; \
-int32x2_t __reint1_267 = splat_lane_s32(*(int32x2_t *) &__reint_267, __p3_267); \
-  __ret_267 = vdot_s32(__s0_267, __s1_267, *(int8x8_t *) &__reint1_267); \
-  __ret_267; \
-})
-#else
-#define vdot_lane_s32(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
-  int32x2_t __s0_268 = __p0_268; \
-  int8x8_t __s1_268 = __p1_268; \
-  int8x8_t __s2_268 = __p2_268; \
-  int32x2_t __rev0_268;  __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 1, 0); \
-  int8x8_t __rev1_268;  __rev1_268 = __builtin_shufflevector(__s1_268, __s1_268, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_268;  __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_268; \
-int8x8_t __reint_268 = __rev2_268; \
-int32x2_t __reint1_268 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_268, __p3_268); \
-  __ret_268 = __noswap_vdot_s32(__rev0_268, __rev1_268, *(int8x8_t *) &__reint1_268); \
-  __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 1, 0); \
-  __ret_268; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_u32(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
-  uint32x4_t __s0_269 = __p0_269; \
-  uint8x16_t __s1_269 = __p1_269; \
-  uint8x16_t __s2_269 = __p2_269; \
-  uint32x4_t __ret_269; \
-uint8x16_t __reint_269 = __s2_269; \
-uint32x4_t __reint1_269 = splatq_laneq_u32(*(uint32x4_t *) &__reint_269, __p3_269); \
-  __ret_269 = vdotq_u32(__s0_269, __s1_269, *(uint8x16_t *) &__reint1_269); \
-  __ret_269; \
-})
-#else
-#define vdotq_laneq_u32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
-  uint32x4_t __s0_270 = __p0_270; \
-  uint8x16_t __s1_270 = __p1_270; \
-  uint8x16_t __s2_270 = __p2_270; \
-  uint32x4_t __rev0_270;  __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 3, 2, 1, 0); \
-  uint8x16_t __rev1_270;  __rev1_270 = __builtin_shufflevector(__s1_270, __s1_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_270;  __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_270; \
-uint8x16_t __reint_270 = __rev2_270; \
-uint32x4_t __reint1_270 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_270, __p3_270); \
-  __ret_270 = __noswap_vdotq_u32(__rev0_270, __rev1_270, *(uint8x16_t *) &__reint1_270); \
-  __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 3, 2, 1, 0); \
-  __ret_270; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
-  int32x4_t __s0_271 = __p0_271; \
-  int8x16_t __s1_271 = __p1_271; \
-  int8x16_t __s2_271 = __p2_271; \
-  int32x4_t __ret_271; \
-int8x16_t __reint_271 = __s2_271; \
-int32x4_t __reint1_271 = splatq_laneq_s32(*(int32x4_t *) &__reint_271, __p3_271); \
-  __ret_271 = vdotq_s32(__s0_271, __s1_271, *(int8x16_t *) &__reint1_271); \
-  __ret_271; \
-})
-#else
-#define vdotq_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
-  int32x4_t __s0_272 = __p0_272; \
-  int8x16_t __s1_272 = __p1_272; \
-  int8x16_t __s2_272 = __p2_272; \
-  int32x4_t __rev0_272;  __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \
-  int8x16_t __rev1_272;  __rev1_272 = __builtin_shufflevector(__s1_272, __s1_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_272;  __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_272; \
-int8x16_t __reint_272 = __rev2_272; \
-int32x4_t __reint1_272 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_272, __p3_272); \
-  __ret_272 = __noswap_vdotq_s32(__rev0_272, __rev1_272, *(int8x16_t *) &__reint1_272); \
-  __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \
-  __ret_272; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_u32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
-  uint32x2_t __s0_273 = __p0_273; \
-  uint8x8_t __s1_273 = __p1_273; \
-  uint8x16_t __s2_273 = __p2_273; \
-  uint32x2_t __ret_273; \
-uint8x16_t __reint_273 = __s2_273; \
-uint32x2_t __reint1_273 = splat_laneq_u32(*(uint32x4_t *) &__reint_273, __p3_273); \
-  __ret_273 = vdot_u32(__s0_273, __s1_273, *(uint8x8_t *) &__reint1_273); \
-  __ret_273; \
-})
-#else
-#define vdot_laneq_u32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
-  uint32x2_t __s0_274 = __p0_274; \
-  uint8x8_t __s1_274 = __p1_274; \
-  uint8x16_t __s2_274 = __p2_274; \
-  uint32x2_t __rev0_274;  __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 1, 0); \
-  uint8x8_t __rev1_274;  __rev1_274 = __builtin_shufflevector(__s1_274, __s1_274, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_274;  __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret_274; \
-uint8x16_t __reint_274 = __rev2_274; \
-uint32x2_t __reint1_274 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_274, __p3_274); \
-  __ret_274 = __noswap_vdot_u32(__rev0_274, __rev1_274, *(uint8x8_t *) &__reint1_274); \
-  __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 1, 0); \
-  __ret_274; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
-  int32x2_t __s0_275 = __p0_275; \
-  int8x8_t __s1_275 = __p1_275; \
-  int8x16_t __s2_275 = __p2_275; \
-  int32x2_t __ret_275; \
-int8x16_t __reint_275 = __s2_275; \
-int32x2_t __reint1_275 = splat_laneq_s32(*(int32x4_t *) &__reint_275, __p3_275); \
-  __ret_275 = vdot_s32(__s0_275, __s1_275, *(int8x8_t *) &__reint1_275); \
-  __ret_275; \
-})
-#else
-#define vdot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
-  int32x2_t __s0_276 = __p0_276; \
-  int8x8_t __s1_276 = __p1_276; \
-  int8x16_t __s2_276 = __p2_276; \
-  int32x2_t __rev0_276;  __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \
-  int8x8_t __rev1_276;  __rev1_276 = __builtin_shufflevector(__s1_276, __s1_276, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_276;  __rev2_276 = __builtin_shufflevector(__s2_276, __s2_276, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_276; \
-int8x16_t __reint_276 = __rev2_276; \
-int32x2_t __reint1_276 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_276, __p3_276); \
-  __ret_276 = __noswap_vdot_s32(__rev0_276, __rev1_276, *(int8x8_t *) &__reint1_276); \
-  __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \
-  __ret_276; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FMA)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \
-  __ret; \
-})
-#else
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \
-  __ret; \
-})
-#else
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = vfmaq_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = vfma_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f16(__p0_277, __p1_277, __p2_277) __extension__ ({ \
-  float16x8_t __s0_277 = __p0_277; \
-  float16x4_t __s1_277 = __p1_277; \
-  float16x8_t __ret_277; \
-  __ret_277 = __s0_277 * splatq_lane_f16(__s1_277, __p2_277); \
-  __ret_277; \
-})
-#else
-#define vmulq_lane_f16(__p0_278, __p1_278, __p2_278) __extension__ ({ \
-  float16x8_t __s0_278 = __p0_278; \
-  float16x4_t __s1_278 = __p1_278; \
-  float16x8_t __rev0_278;  __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1_278;  __rev1_278 = __builtin_shufflevector(__s1_278, __s1_278, 3, 2, 1, 0); \
-  float16x8_t __ret_278; \
-  __ret_278 = __rev0_278 * __noswap_splatq_lane_f16(__rev1_278, __p2_278); \
-  __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_278; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f16(__p0_279, __p1_279, __p2_279) __extension__ ({ \
-  float16x4_t __s0_279 = __p0_279; \
-  float16x4_t __s1_279 = __p1_279; \
-  float16x4_t __ret_279; \
-  __ret_279 = __s0_279 * splat_lane_f16(__s1_279, __p2_279); \
-  __ret_279; \
-})
-#else
-#define vmul_lane_f16(__p0_280, __p1_280, __p2_280) __extension__ ({ \
-  float16x4_t __s0_280 = __p0_280; \
-  float16x4_t __s1_280 = __p1_280; \
-  float16x4_t __rev0_280;  __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 3, 2, 1, 0); \
-  float16x4_t __rev1_280;  __rev1_280 = __builtin_shufflevector(__s1_280, __s1_280, 3, 2, 1, 0); \
-  float16x4_t __ret_280; \
-  __ret_280 = __rev0_280 * __noswap_splat_lane_f16(__rev1_280, __p2_280); \
-  __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 3, 2, 1, 0); \
-  __ret_280; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_lane_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
-  float16_t __s0_281 = __p0_281; \
-  float16_t __s1_281 = __p1_281; \
-  float16x4_t __s2_281 = __p2_281; \
-  float16_t __ret_281; \
-  __ret_281 = vfmah_lane_f16(__s0_281, -__s1_281, __s2_281, __p3_281); \
-  __ret_281; \
-})
-#else
-#define vfmsh_lane_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
-  float16_t __s0_282 = __p0_282; \
-  float16_t __s1_282 = __p1_282; \
-  float16x4_t __s2_282 = __p2_282; \
-  float16x4_t __rev2_282;  __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \
-  float16_t __ret_282; \
-  __ret_282 = __noswap_vfmah_lane_f16(__s0_282, -__s1_282, __rev2_282, __p3_282); \
-  __ret_282; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
-  float16x8_t __s0_283 = __p0_283; \
-  float16x8_t __s1_283 = __p1_283; \
-  float16x4_t __s2_283 = __p2_283; \
-  float16x8_t __ret_283; \
-  __ret_283 = vfmaq_lane_f16(__s0_283, -__s1_283, __s2_283, __p3_283); \
-  __ret_283; \
-})
-#else
-#define vfmsq_lane_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
-  float16x8_t __s0_284 = __p0_284; \
-  float16x8_t __s1_284 = __p1_284; \
-  float16x4_t __s2_284 = __p2_284; \
-  float16x8_t __rev0_284;  __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_284;  __rev1_284 = __builtin_shufflevector(__s1_284, __s1_284, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_284;  __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 3, 2, 1, 0); \
-  float16x8_t __ret_284; \
-  __ret_284 = __noswap_vfmaq_lane_f16(__rev0_284, -__rev1_284, __rev2_284, __p3_284); \
-  __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_284; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
-  float16x4_t __s0_285 = __p0_285; \
-  float16x4_t __s1_285 = __p1_285; \
-  float16x4_t __s2_285 = __p2_285; \
-  float16x4_t __ret_285; \
-  __ret_285 = vfma_lane_f16(__s0_285, -__s1_285, __s2_285, __p3_285); \
-  __ret_285; \
-})
-#else
-#define vfms_lane_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
-  float16x4_t __s0_286 = __p0_286; \
-  float16x4_t __s1_286 = __p1_286; \
-  float16x4_t __s2_286 = __p2_286; \
-  float16x4_t __rev0_286;  __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 3, 2, 1, 0); \
-  float16x4_t __rev1_286;  __rev1_286 = __builtin_shufflevector(__s1_286, __s1_286, 3, 2, 1, 0); \
-  float16x4_t __rev2_286;  __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 3, 2, 1, 0); \
-  float16x4_t __ret_286; \
-  __ret_286 = __noswap_vfma_lane_f16(__rev0_286, -__rev1_286, __rev2_286, __p3_286); \
-  __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 3, 2, 1, 0); \
-  __ret_286; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_laneq_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
-  float16_t __s0_287 = __p0_287; \
-  float16_t __s1_287 = __p1_287; \
-  float16x8_t __s2_287 = __p2_287; \
-  float16_t __ret_287; \
-  __ret_287 = vfmah_laneq_f16(__s0_287, -__s1_287, __s2_287, __p3_287); \
-  __ret_287; \
-})
-#else
-#define vfmsh_laneq_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
-  float16_t __s0_288 = __p0_288; \
-  float16_t __s1_288 = __p1_288; \
-  float16x8_t __s2_288 = __p2_288; \
-  float16x8_t __rev2_288;  __rev2_288 = __builtin_shufflevector(__s2_288, __s2_288, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_288; \
-  __ret_288 = __noswap_vfmah_laneq_f16(__s0_288, -__s1_288, __rev2_288, __p3_288); \
-  __ret_288; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
-  float16x8_t __s0_289 = __p0_289; \
-  float16x8_t __s1_289 = __p1_289; \
-  float16x8_t __s2_289 = __p2_289; \
-  float16x8_t __ret_289; \
-  __ret_289 = vfmaq_laneq_f16(__s0_289, -__s1_289, __s2_289, __p3_289); \
-  __ret_289; \
-})
-#else
-#define vfmsq_laneq_f16(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
-  float16x8_t __s0_290 = __p0_290; \
-  float16x8_t __s1_290 = __p1_290; \
-  float16x8_t __s2_290 = __p2_290; \
-  float16x8_t __rev0_290;  __rev0_290 = __builtin_shufflevector(__s0_290, __s0_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_290;  __rev1_290 = __builtin_shufflevector(__s1_290, __s1_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_290;  __rev2_290 = __builtin_shufflevector(__s2_290, __s2_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_290; \
-  __ret_290 = __noswap_vfmaq_laneq_f16(__rev0_290, -__rev1_290, __rev2_290, __p3_290); \
-  __ret_290 = __builtin_shufflevector(__ret_290, __ret_290, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_290; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f16(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
-  float16x4_t __s0_291 = __p0_291; \
-  float16x4_t __s1_291 = __p1_291; \
-  float16x8_t __s2_291 = __p2_291; \
-  float16x4_t __ret_291; \
-  __ret_291 = vfma_laneq_f16(__s0_291, -__s1_291, __s2_291, __p3_291); \
-  __ret_291; \
-})
-#else
-#define vfms_laneq_f16(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
-  float16x4_t __s0_292 = __p0_292; \
-  float16x4_t __s1_292 = __p1_292; \
-  float16x8_t __s2_292 = __p2_292; \
-  float16x4_t __rev0_292;  __rev0_292 = __builtin_shufflevector(__s0_292, __s0_292, 3, 2, 1, 0); \
-  float16x4_t __rev1_292;  __rev1_292 = __builtin_shufflevector(__s1_292, __s1_292, 3, 2, 1, 0); \
-  float16x8_t __rev2_292;  __rev2_292 = __builtin_shufflevector(__s2_292, __s2_292, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_292; \
-  __ret_292 = __noswap_vfma_laneq_f16(__rev0_292, -__rev1_292, __rev2_292, __p3_292); \
-  __ret_292 = __builtin_shufflevector(__ret_292, __ret_292, 3, 2, 1, 0); \
-  __ret_292; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \
-  float16x8_t __s0_293 = __p0_293; \
-  float16x8_t __s1_293 = __p1_293; \
-  float16x8_t __ret_293; \
-  __ret_293 = __s0_293 * splatq_laneq_f16(__s1_293, __p2_293); \
-  __ret_293; \
-})
-#else
-#define vmulq_laneq_f16(__p0_294, __p1_294, __p2_294) __extension__ ({ \
-  float16x8_t __s0_294 = __p0_294; \
-  float16x8_t __s1_294 = __p1_294; \
-  float16x8_t __rev0_294;  __rev0_294 = __builtin_shufflevector(__s0_294, __s0_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_294;  __rev1_294 = __builtin_shufflevector(__s1_294, __s1_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_294; \
-  __ret_294 = __rev0_294 * __noswap_splatq_laneq_f16(__rev1_294, __p2_294); \
-  __ret_294 = __builtin_shufflevector(__ret_294, __ret_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_294; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f16(__p0_295, __p1_295, __p2_295) __extension__ ({ \
-  float16x4_t __s0_295 = __p0_295; \
-  float16x8_t __s1_295 = __p1_295; \
-  float16x4_t __ret_295; \
-  __ret_295 = __s0_295 * splat_laneq_f16(__s1_295, __p2_295); \
-  __ret_295; \
-})
-#else
-#define vmul_laneq_f16(__p0_296, __p1_296, __p2_296) __extension__ ({ \
-  float16x4_t __s0_296 = __p0_296; \
-  float16x8_t __s1_296 = __p1_296; \
-  float16x4_t __rev0_296;  __rev0_296 = __builtin_shufflevector(__s0_296, __s0_296, 3, 2, 1, 0); \
-  float16x8_t __rev1_296;  __rev1_296 = __builtin_shufflevector(__s1_296, __s1_296, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_296; \
-  __ret_296 = __rev0_296 * __noswap_splat_laneq_f16(__rev1_296, __p2_296); \
-  __ret_296 = __builtin_shufflevector(__ret_296, __ret_296, 3, 2, 1, 0); \
-  __ret_296; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f16(__p0_297, __p1_297, __p2_297) __extension__ ({ \
-  float16x8_t __s0_297 = __p0_297; \
-  float16x4_t __s1_297 = __p1_297; \
-  float16x8_t __ret_297; \
-  __ret_297 = vmulxq_f16(__s0_297, splatq_lane_f16(__s1_297, __p2_297)); \
-  __ret_297; \
-})
-#else
-#define vmulxq_lane_f16(__p0_298, __p1_298, __p2_298) __extension__ ({ \
-  float16x8_t __s0_298 = __p0_298; \
-  float16x4_t __s1_298 = __p1_298; \
-  float16x8_t __rev0_298;  __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1_298;  __rev1_298 = __builtin_shufflevector(__s1_298, __s1_298, 3, 2, 1, 0); \
-  float16x8_t __ret_298; \
-  __ret_298 = __noswap_vmulxq_f16(__rev0_298, __noswap_splatq_lane_f16(__rev1_298, __p2_298)); \
-  __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_298; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f16(__p0_299, __p1_299, __p2_299) __extension__ ({ \
-  float16x4_t __s0_299 = __p0_299; \
-  float16x4_t __s1_299 = __p1_299; \
-  float16x4_t __ret_299; \
-  __ret_299 = vmulx_f16(__s0_299, splat_lane_f16(__s1_299, __p2_299)); \
-  __ret_299; \
-})
-#else
-#define vmulx_lane_f16(__p0_300, __p1_300, __p2_300) __extension__ ({ \
-  float16x4_t __s0_300 = __p0_300; \
-  float16x4_t __s1_300 = __p1_300; \
-  float16x4_t __rev0_300;  __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 3, 2, 1, 0); \
-  float16x4_t __rev1_300;  __rev1_300 = __builtin_shufflevector(__s1_300, __s1_300, 3, 2, 1, 0); \
-  float16x4_t __ret_300; \
-  __ret_300 = __noswap_vmulx_f16(__rev0_300, __noswap_splat_lane_f16(__rev1_300, __p2_300)); \
-  __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 3, 2, 1, 0); \
-  __ret_300; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f16(__p0_301, __p1_301, __p2_301) __extension__ ({ \
-  float16x8_t __s0_301 = __p0_301; \
-  float16x8_t __s1_301 = __p1_301; \
-  float16x8_t __ret_301; \
-  __ret_301 = vmulxq_f16(__s0_301, splatq_laneq_f16(__s1_301, __p2_301)); \
-  __ret_301; \
-})
-#else
-#define vmulxq_laneq_f16(__p0_302, __p1_302, __p2_302) __extension__ ({ \
-  float16x8_t __s0_302 = __p0_302; \
-  float16x8_t __s1_302 = __p1_302; \
-  float16x8_t __rev0_302;  __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_302;  __rev1_302 = __builtin_shufflevector(__s1_302, __s1_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_302; \
-  __ret_302 = __noswap_vmulxq_f16(__rev0_302, __noswap_splatq_laneq_f16(__rev1_302, __p2_302)); \
-  __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_302; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f16(__p0_303, __p1_303, __p2_303) __extension__ ({ \
-  float16x4_t __s0_303 = __p0_303; \
-  float16x8_t __s1_303 = __p1_303; \
-  float16x4_t __ret_303; \
-  __ret_303 = vmulx_f16(__s0_303, splat_laneq_f16(__s1_303, __p2_303)); \
-  __ret_303; \
-})
-#else
-#define vmulx_laneq_f16(__p0_304, __p1_304, __p2_304) __extension__ ({ \
-  float16x4_t __s0_304 = __p0_304; \
-  float16x8_t __s1_304 = __p1_304; \
-  float16x4_t __rev0_304;  __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 3, 2, 1, 0); \
-  float16x8_t __rev1_304;  __rev1_304 = __builtin_shufflevector(__s1_304, __s1_304, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_304; \
-  __ret_304 = __noswap_vmulx_f16(__rev0_304, __noswap_splat_laneq_f16(__rev1_304, __p2_304)); \
-  __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 3, 2, 1, 0); \
-  __ret_304; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_MATMUL_INT8)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdotq_lane_s32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
-  int32x4_t __s0_305 = __p0_305; \
-  uint8x16_t __s1_305 = __p1_305; \
-  int8x8_t __s2_305 = __p2_305; \
-  int32x4_t __ret_305; \
-int8x8_t __reint_305 = __s2_305; \
-  __ret_305 = vusdotq_s32(__s0_305, __s1_305, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_305, __p3_305))); \
-  __ret_305; \
-})
-#else
-#define vusdotq_lane_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
-  int32x4_t __s0_306 = __p0_306; \
-  uint8x16_t __s1_306 = __p1_306; \
-  int8x8_t __s2_306 = __p2_306; \
-  int32x4_t __rev0_306;  __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \
-  uint8x16_t __rev1_306;  __rev1_306 = __builtin_shufflevector(__s1_306, __s1_306, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_306;  __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_306; \
-int8x8_t __reint_306 = __rev2_306; \
-  __ret_306 = __noswap_vusdotq_s32(__rev0_306, __rev1_306, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_306, __p3_306))); \
-  __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \
-  __ret_306; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdot_lane_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
-  int32x2_t __s0_307 = __p0_307; \
-  uint8x8_t __s1_307 = __p1_307; \
-  int8x8_t __s2_307 = __p2_307; \
-  int32x2_t __ret_307; \
-int8x8_t __reint_307 = __s2_307; \
-  __ret_307 = vusdot_s32(__s0_307, __s1_307, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_307, __p3_307))); \
-  __ret_307; \
-})
-#else
-#define vusdot_lane_s32(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
-  int32x2_t __s0_308 = __p0_308; \
-  uint8x8_t __s1_308 = __p1_308; \
-  int8x8_t __s2_308 = __p2_308; \
-  int32x2_t __rev0_308;  __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \
-  uint8x8_t __rev1_308;  __rev1_308 = __builtin_shufflevector(__s1_308, __s1_308, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_308;  __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_308; \
-int8x8_t __reint_308 = __rev2_308; \
-  __ret_308 = __noswap_vusdot_s32(__rev0_308, __rev1_308, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_308, __p3_308))); \
-  __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \
-  __ret_308; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX)
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s32(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
-  int32x4_t __s0_309 = __p0_309; \
-  int32x4_t __s1_309 = __p1_309; \
-  int32x2_t __s2_309 = __p2_309; \
-  int32x4_t __ret_309; \
-  __ret_309 = vqaddq_s32(__s0_309, vqrdmulhq_s32(__s1_309, splatq_lane_s32(__s2_309, __p3_309))); \
-  __ret_309; \
-})
-#else
-#define vqrdmlahq_lane_s32(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
-  int32x4_t __s0_310 = __p0_310; \
-  int32x4_t __s1_310 = __p1_310; \
-  int32x2_t __s2_310 = __p2_310; \
-  int32x4_t __rev0_310;  __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 3, 2, 1, 0); \
-  int32x4_t __rev1_310;  __rev1_310 = __builtin_shufflevector(__s1_310, __s1_310, 3, 2, 1, 0); \
-  int32x2_t __rev2_310;  __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \
-  int32x4_t __ret_310; \
-  __ret_310 = __noswap_vqaddq_s32(__rev0_310, __noswap_vqrdmulhq_s32(__rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310))); \
-  __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \
-  __ret_310; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s16(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
-  int16x8_t __s0_311 = __p0_311; \
-  int16x8_t __s1_311 = __p1_311; \
-  int16x4_t __s2_311 = __p2_311; \
-  int16x8_t __ret_311; \
-  __ret_311 = vqaddq_s16(__s0_311, vqrdmulhq_s16(__s1_311, splatq_lane_s16(__s2_311, __p3_311))); \
-  __ret_311; \
-})
-#else
-#define vqrdmlahq_lane_s16(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
-  int16x8_t __s0_312 = __p0_312; \
-  int16x8_t __s1_312 = __p1_312; \
-  int16x4_t __s2_312 = __p2_312; \
-  int16x8_t __rev0_312;  __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_312;  __rev1_312 = __builtin_shufflevector(__s1_312, __s1_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_312;  __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 3, 2, 1, 0); \
-  int16x8_t __ret_312; \
-  __ret_312 = __noswap_vqaddq_s16(__rev0_312, __noswap_vqrdmulhq_s16(__rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312))); \
-  __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_312; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
-  int32x2_t __s0_313 = __p0_313; \
-  int32x2_t __s1_313 = __p1_313; \
-  int32x2_t __s2_313 = __p2_313; \
-  int32x2_t __ret_313; \
-  __ret_313 = vqadd_s32(__s0_313, vqrdmulh_s32(__s1_313, splat_lane_s32(__s2_313, __p3_313))); \
-  __ret_313; \
-})
-#else
-#define vqrdmlah_lane_s32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
-  int32x2_t __s0_314 = __p0_314; \
-  int32x2_t __s1_314 = __p1_314; \
-  int32x2_t __s2_314 = __p2_314; \
-  int32x2_t __rev0_314;  __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 1, 0); \
-  int32x2_t __rev1_314;  __rev1_314 = __builtin_shufflevector(__s1_314, __s1_314, 1, 0); \
-  int32x2_t __rev2_314;  __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \
-  int32x2_t __ret_314; \
-  __ret_314 = __noswap_vqadd_s32(__rev0_314, __noswap_vqrdmulh_s32(__rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314))); \
-  __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \
-  __ret_314; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
-  int16x4_t __s0_315 = __p0_315; \
-  int16x4_t __s1_315 = __p1_315; \
-  int16x4_t __s2_315 = __p2_315; \
-  int16x4_t __ret_315; \
-  __ret_315 = vqadd_s16(__s0_315, vqrdmulh_s16(__s1_315, splat_lane_s16(__s2_315, __p3_315))); \
-  __ret_315; \
-})
-#else
-#define vqrdmlah_lane_s16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
-  int16x4_t __s0_316 = __p0_316; \
-  int16x4_t __s1_316 = __p1_316; \
-  int16x4_t __s2_316 = __p2_316; \
-  int16x4_t __rev0_316;  __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \
-  int16x4_t __rev1_316;  __rev1_316 = __builtin_shufflevector(__s1_316, __s1_316, 3, 2, 1, 0); \
-  int16x4_t __rev2_316;  __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \
-  int16x4_t __ret_316; \
-  __ret_316 = __noswap_vqadd_s16(__rev0_316, __noswap_vqrdmulh_s16(__rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316))); \
-  __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \
-  __ret_316; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s32(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
-  int32x4_t __s0_317 = __p0_317; \
-  int32x4_t __s1_317 = __p1_317; \
-  int32x2_t __s2_317 = __p2_317; \
-  int32x4_t __ret_317; \
-  __ret_317 = vqsubq_s32(__s0_317, vqrdmulhq_s32(__s1_317, splatq_lane_s32(__s2_317, __p3_317))); \
-  __ret_317; \
-})
-#else
-#define vqrdmlshq_lane_s32(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
-  int32x4_t __s0_318 = __p0_318; \
-  int32x4_t __s1_318 = __p1_318; \
-  int32x2_t __s2_318 = __p2_318; \
-  int32x4_t __rev0_318;  __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 3, 2, 1, 0); \
-  int32x4_t __rev1_318;  __rev1_318 = __builtin_shufflevector(__s1_318, __s1_318, 3, 2, 1, 0); \
-  int32x2_t __rev2_318;  __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \
-  int32x4_t __ret_318; \
-  __ret_318 = __noswap_vqsubq_s32(__rev0_318, __noswap_vqrdmulhq_s32(__rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318))); \
-  __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 3, 2, 1, 0); \
-  __ret_318; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
-  int16x8_t __s0_319 = __p0_319; \
-  int16x8_t __s1_319 = __p1_319; \
-  int16x4_t __s2_319 = __p2_319; \
-  int16x8_t __ret_319; \
-  __ret_319 = vqsubq_s16(__s0_319, vqrdmulhq_s16(__s1_319, splatq_lane_s16(__s2_319, __p3_319))); \
-  __ret_319; \
-})
-#else
-#define vqrdmlshq_lane_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
-  int16x8_t __s0_320 = __p0_320; \
-  int16x8_t __s1_320 = __p1_320; \
-  int16x4_t __s2_320 = __p2_320; \
-  int16x8_t __rev0_320;  __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_320;  __rev1_320 = __builtin_shufflevector(__s1_320, __s1_320, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_320;  __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \
-  int16x8_t __ret_320; \
-  __ret_320 = __noswap_vqsubq_s16(__rev0_320, __noswap_vqrdmulhq_s16(__rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320))); \
-  __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_320; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s32(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
-  int32x2_t __s0_321 = __p0_321; \
-  int32x2_t __s1_321 = __p1_321; \
-  int32x2_t __s2_321 = __p2_321; \
-  int32x2_t __ret_321; \
-  __ret_321 = vqsub_s32(__s0_321, vqrdmulh_s32(__s1_321, splat_lane_s32(__s2_321, __p3_321))); \
-  __ret_321; \
-})
-#else
-#define vqrdmlsh_lane_s32(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
-  int32x2_t __s0_322 = __p0_322; \
-  int32x2_t __s1_322 = __p1_322; \
-  int32x2_t __s2_322 = __p2_322; \
-  int32x2_t __rev0_322;  __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 1, 0); \
-  int32x2_t __rev1_322;  __rev1_322 = __builtin_shufflevector(__s1_322, __s1_322, 1, 0); \
-  int32x2_t __rev2_322;  __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \
-  int32x2_t __ret_322; \
-  __ret_322 = __noswap_vqsub_s32(__rev0_322, __noswap_vqrdmulh_s32(__rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322))); \
-  __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 1, 0); \
-  __ret_322; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
-  int16x4_t __s0_323 = __p0_323; \
-  int16x4_t __s1_323 = __p1_323; \
-  int16x4_t __s2_323 = __p2_323; \
-  int16x4_t __ret_323; \
-  __ret_323 = vqsub_s16(__s0_323, vqrdmulh_s16(__s1_323, splat_lane_s16(__s2_323, __p3_323))); \
-  __ret_323; \
-})
-#else
-#define vqrdmlsh_lane_s16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \
-  int16x4_t __s0_324 = __p0_324; \
-  int16x4_t __s1_324 = __p1_324; \
-  int16x4_t __s2_324 = __p2_324; \
-  int16x4_t __rev0_324;  __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \
-  int16x4_t __rev1_324;  __rev1_324 = __builtin_shufflevector(__s1_324, __s1_324, 3, 2, 1, 0); \
-  int16x4_t __rev2_324;  __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 3, 2, 1, 0); \
-  int16x4_t __ret_324; \
-  __ret_324 = __noswap_vqsub_s16(__rev0_324, __noswap_vqrdmulh_s16(__rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324))); \
-  __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \
-  __ret_324; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
-  int32x4_t __s0_325 = __p0_325; \
-  int32x4_t __s1_325 = __p1_325; \
-  int32x4_t __s2_325 = __p2_325; \
-  int32x4_t __ret_325; \
-  __ret_325 = vqaddq_s32(__s0_325, vqrdmulhq_s32(__s1_325, splatq_laneq_s32(__s2_325, __p3_325))); \
-  __ret_325; \
-})
-#else
-#define vqrdmlahq_laneq_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
-  int32x4_t __s0_326 = __p0_326; \
-  int32x4_t __s1_326 = __p1_326; \
-  int32x4_t __s2_326 = __p2_326; \
-  int32x4_t __rev0_326;  __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 3, 2, 1, 0); \
-  int32x4_t __rev1_326;  __rev1_326 = __builtin_shufflevector(__s1_326, __s1_326, 3, 2, 1, 0); \
-  int32x4_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 3, 2, 1, 0); \
-  int32x4_t __ret_326; \
-  __ret_326 = __noswap_vqaddq_s32(__rev0_326, __noswap_vqrdmulhq_s32(__rev1_326, __noswap_splatq_laneq_s32(__rev2_326, __p3_326))); \
-  __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 3, 2, 1, 0); \
-  __ret_326; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
-  int16x8_t __s0_327 = __p0_327; \
-  int16x8_t __s1_327 = __p1_327; \
-  int16x8_t __s2_327 = __p2_327; \
-  int16x8_t __ret_327; \
-  __ret_327 = vqaddq_s16(__s0_327, vqrdmulhq_s16(__s1_327, splatq_laneq_s16(__s2_327, __p3_327))); \
-  __ret_327; \
-})
-#else
-#define vqrdmlahq_laneq_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
-  int16x8_t __s0_328 = __p0_328; \
-  int16x8_t __s1_328 = __p1_328; \
-  int16x8_t __s2_328 = __p2_328; \
-  int16x8_t __rev0_328;  __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_328;  __rev1_328 = __builtin_shufflevector(__s1_328, __s1_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_328; \
-  __ret_328 = __noswap_vqaddq_s16(__rev0_328, __noswap_vqrdmulhq_s16(__rev1_328, __noswap_splatq_laneq_s16(__rev2_328, __p3_328))); \
-  __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_328; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
-  int32x2_t __s0_329 = __p0_329; \
-  int32x2_t __s1_329 = __p1_329; \
-  int32x4_t __s2_329 = __p2_329; \
-  int32x2_t __ret_329; \
-  __ret_329 = vqadd_s32(__s0_329, vqrdmulh_s32(__s1_329, splat_laneq_s32(__s2_329, __p3_329))); \
-  __ret_329; \
-})
-#else
-#define vqrdmlah_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
-  int32x2_t __s0_330 = __p0_330; \
-  int32x2_t __s1_330 = __p1_330; \
-  int32x4_t __s2_330 = __p2_330; \
-  int32x2_t __rev0_330;  __rev0_330 = __builtin_shufflevector(__s0_330, __s0_330, 1, 0); \
-  int32x2_t __rev1_330;  __rev1_330 = __builtin_shufflevector(__s1_330, __s1_330, 1, 0); \
-  int32x4_t __rev2_330;  __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \
-  int32x2_t __ret_330; \
-  __ret_330 = __noswap_vqadd_s32(__rev0_330, __noswap_vqrdmulh_s32(__rev1_330, __noswap_splat_laneq_s32(__rev2_330, __p3_330))); \
-  __ret_330 = __builtin_shufflevector(__ret_330, __ret_330, 1, 0); \
-  __ret_330; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
-  int16x4_t __s0_331 = __p0_331; \
-  int16x4_t __s1_331 = __p1_331; \
-  int16x8_t __s2_331 = __p2_331; \
-  int16x4_t __ret_331; \
-  __ret_331 = vqadd_s16(__s0_331, vqrdmulh_s16(__s1_331, splat_laneq_s16(__s2_331, __p3_331))); \
-  __ret_331; \
-})
-#else
-#define vqrdmlah_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
-  int16x4_t __s0_332 = __p0_332; \
-  int16x4_t __s1_332 = __p1_332; \
-  int16x8_t __s2_332 = __p2_332; \
-  int16x4_t __rev0_332;  __rev0_332 = __builtin_shufflevector(__s0_332, __s0_332, 3, 2, 1, 0); \
-  int16x4_t __rev1_332;  __rev1_332 = __builtin_shufflevector(__s1_332, __s1_332, 3, 2, 1, 0); \
-  int16x8_t __rev2_332;  __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_332; \
-  __ret_332 = __noswap_vqadd_s16(__rev0_332, __noswap_vqrdmulh_s16(__rev1_332, __noswap_splat_laneq_s16(__rev2_332, __p3_332))); \
-  __ret_332 = __builtin_shufflevector(__ret_332, __ret_332, 3, 2, 1, 0); \
-  __ret_332; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
-  int32x4_t __s0_333 = __p0_333; \
-  int32x4_t __s1_333 = __p1_333; \
-  int32x4_t __s2_333 = __p2_333; \
-  int32x4_t __ret_333; \
-  __ret_333 = vqsubq_s32(__s0_333, vqrdmulhq_s32(__s1_333, splatq_laneq_s32(__s2_333, __p3_333))); \
-  __ret_333; \
-})
-#else
-#define vqrdmlshq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
-  int32x4_t __s0_334 = __p0_334; \
-  int32x4_t __s1_334 = __p1_334; \
-  int32x4_t __s2_334 = __p2_334; \
-  int32x4_t __rev0_334;  __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 3, 2, 1, 0); \
-  int32x4_t __rev1_334;  __rev1_334 = __builtin_shufflevector(__s1_334, __s1_334, 3, 2, 1, 0); \
-  int32x4_t __rev2_334;  __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 3, 2, 1, 0); \
-  int32x4_t __ret_334; \
-  __ret_334 = __noswap_vqsubq_s32(__rev0_334, __noswap_vqrdmulhq_s32(__rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334))); \
-  __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \
-  __ret_334; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \
-  int16x8_t __s0_335 = __p0_335; \
-  int16x8_t __s1_335 = __p1_335; \
-  int16x8_t __s2_335 = __p2_335; \
-  int16x8_t __ret_335; \
-  __ret_335 = vqsubq_s16(__s0_335, vqrdmulhq_s16(__s1_335, splatq_laneq_s16(__s2_335, __p3_335))); \
-  __ret_335; \
-})
-#else
-#define vqrdmlshq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \
-  int16x8_t __s0_336 = __p0_336; \
-  int16x8_t __s1_336 = __p1_336; \
-  int16x8_t __s2_336 = __p2_336; \
-  int16x8_t __rev0_336;  __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_336;  __rev1_336 = __builtin_shufflevector(__s1_336, __s1_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_336;  __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_336; \
-  __ret_336 = __noswap_vqsubq_s16(__rev0_336, __noswap_vqrdmulhq_s16(__rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336))); \
-  __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_336; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \
-  int32x2_t __s0_337 = __p0_337; \
-  int32x2_t __s1_337 = __p1_337; \
-  int32x4_t __s2_337 = __p2_337; \
-  int32x2_t __ret_337; \
-  __ret_337 = vqsub_s32(__s0_337, vqrdmulh_s32(__s1_337, splat_laneq_s32(__s2_337, __p3_337))); \
-  __ret_337; \
-})
-#else
-#define vqrdmlsh_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \
-  int32x2_t __s0_338 = __p0_338; \
-  int32x2_t __s1_338 = __p1_338; \
-  int32x4_t __s2_338 = __p2_338; \
-  int32x2_t __rev0_338;  __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \
-  int32x2_t __rev1_338;  __rev1_338 = __builtin_shufflevector(__s1_338, __s1_338, 1, 0); \
-  int32x4_t __rev2_338;  __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \
-  int32x2_t __ret_338; \
-  __ret_338 = __noswap_vqsub_s32(__rev0_338, __noswap_vqrdmulh_s32(__rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338))); \
-  __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \
-  __ret_338; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \
-  int16x4_t __s0_339 = __p0_339; \
-  int16x4_t __s1_339 = __p1_339; \
-  int16x8_t __s2_339 = __p2_339; \
-  int16x4_t __ret_339; \
-  __ret_339 = vqsub_s16(__s0_339, vqrdmulh_s16(__s1_339, splat_laneq_s16(__s2_339, __p3_339))); \
-  __ret_339; \
-})
-#else
-#define vqrdmlsh_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \
-  int16x4_t __s0_340 = __p0_340; \
-  int16x4_t __s1_340 = __p1_340; \
-  int16x8_t __s2_340 = __p2_340; \
-  int16x4_t __rev0_340;  __rev0_340 = __builtin_shufflevector(__s0_340, __s0_340, 3, 2, 1, 0); \
-  int16x4_t __rev1_340;  __rev1_340 = __builtin_shufflevector(__s1_340, __s1_340, 3, 2, 1, 0); \
-  int16x8_t __rev2_340;  __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_340; \
-  __ret_340 = __noswap_vqsub_s16(__rev0_340, __noswap_vqrdmulh_s16(__rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340))); \
-  __ret_340 = __builtin_shufflevector(__ret_340, __ret_340, 3, 2, 1, 0); \
-  __ret_340; \
-})
-#endif
-
-#endif
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabs_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai int64x1_t vabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int64_t vabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) {
-  poly128_t __ret;
-  __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
-  return __ret;
-}
-#else
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vceqzd_u64(uint64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
-  return __ret;
-}
-__ai uint64_t vceqzd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vceqzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vceqzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vcgezd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vcgtzd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgtzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgtzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vclezd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vclezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vclezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vcltzd_s64(int64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcltzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcltzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p8(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \
-  poly8x16_t __s0_341 = __p0_341; \
-  poly8x8_t __s2_341 = __p2_341; \
-  poly8x16_t __ret_341; \
-  __ret_341 = vsetq_lane_p8(vget_lane_p8(__s2_341, __p3_341), __s0_341, __p1_341); \
-  __ret_341; \
-})
-#else
-#define vcopyq_lane_p8(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \
-  poly8x16_t __s0_342 = __p0_342; \
-  poly8x8_t __s2_342 = __p2_342; \
-  poly8x16_t __rev0_342;  __rev0_342 = __builtin_shufflevector(__s0_342, __s0_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_342;  __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_342; \
-  __ret_342 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_342, __p3_342), __rev0_342, __p1_342); \
-  __ret_342 = __builtin_shufflevector(__ret_342, __ret_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_342; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \
-  poly16x8_t __s0_343 = __p0_343; \
-  poly16x4_t __s2_343 = __p2_343; \
-  poly16x8_t __ret_343; \
-  __ret_343 = vsetq_lane_p16(vget_lane_p16(__s2_343, __p3_343), __s0_343, __p1_343); \
-  __ret_343; \
-})
-#else
-#define vcopyq_lane_p16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \
-  poly16x8_t __s0_344 = __p0_344; \
-  poly16x4_t __s2_344 = __p2_344; \
-  poly16x8_t __rev0_344;  __rev0_344 = __builtin_shufflevector(__s0_344, __s0_344, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __rev2_344;  __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \
-  poly16x8_t __ret_344; \
-  __ret_344 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_344, __p3_344), __rev0_344, __p1_344); \
-  __ret_344 = __builtin_shufflevector(__ret_344, __ret_344, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_344; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u8(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \
-  uint8x16_t __s0_345 = __p0_345; \
-  uint8x8_t __s2_345 = __p2_345; \
-  uint8x16_t __ret_345; \
-  __ret_345 = vsetq_lane_u8(vget_lane_u8(__s2_345, __p3_345), __s0_345, __p1_345); \
-  __ret_345; \
-})
-#else
-#define vcopyq_lane_u8(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \
-  uint8x16_t __s0_346 = __p0_346; \
-  uint8x8_t __s2_346 = __p2_346; \
-  uint8x16_t __rev0_346;  __rev0_346 = __builtin_shufflevector(__s0_346, __s0_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_346;  __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_346; \
-  __ret_346 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_346, __p3_346), __rev0_346, __p1_346); \
-  __ret_346 = __builtin_shufflevector(__ret_346, __ret_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_346; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u32(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \
-  uint32x4_t __s0_347 = __p0_347; \
-  uint32x2_t __s2_347 = __p2_347; \
-  uint32x4_t __ret_347; \
-  __ret_347 = vsetq_lane_u32(vget_lane_u32(__s2_347, __p3_347), __s0_347, __p1_347); \
-  __ret_347; \
-})
-#else
-#define vcopyq_lane_u32(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \
-  uint32x4_t __s0_348 = __p0_348; \
-  uint32x2_t __s2_348 = __p2_348; \
-  uint32x4_t __rev0_348;  __rev0_348 = __builtin_shufflevector(__s0_348, __s0_348, 3, 2, 1, 0); \
-  uint32x2_t __rev2_348;  __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 1, 0); \
-  uint32x4_t __ret_348; \
-  __ret_348 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_348, __p3_348), __rev0_348, __p1_348); \
-  __ret_348 = __builtin_shufflevector(__ret_348, __ret_348, 3, 2, 1, 0); \
-  __ret_348; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u64(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \
-  uint64x2_t __s0_349 = __p0_349; \
-  uint64x1_t __s2_349 = __p2_349; \
-  uint64x2_t __ret_349; \
-  __ret_349 = vsetq_lane_u64(vget_lane_u64(__s2_349, __p3_349), __s0_349, __p1_349); \
-  __ret_349; \
-})
-#else
-#define vcopyq_lane_u64(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \
-  uint64x2_t __s0_350 = __p0_350; \
-  uint64x1_t __s2_350 = __p2_350; \
-  uint64x2_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 1, 0); \
-  uint64x2_t __ret_350; \
-  __ret_350 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_350, __p3_350), __rev0_350, __p1_350); \
-  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 1, 0); \
-  __ret_350; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \
-  uint16x8_t __s0_351 = __p0_351; \
-  uint16x4_t __s2_351 = __p2_351; \
-  uint16x8_t __ret_351; \
-  __ret_351 = vsetq_lane_u16(vget_lane_u16(__s2_351, __p3_351), __s0_351, __p1_351); \
-  __ret_351; \
-})
-#else
-#define vcopyq_lane_u16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \
-  uint16x8_t __s0_352 = __p0_352; \
-  uint16x4_t __s2_352 = __p2_352; \
-  uint16x8_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_352;  __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 3, 2, 1, 0); \
-  uint16x8_t __ret_352; \
-  __ret_352 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_352, __p3_352), __rev0_352, __p1_352); \
-  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_352; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s8(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \
-  int8x16_t __s0_353 = __p0_353; \
-  int8x8_t __s2_353 = __p2_353; \
-  int8x16_t __ret_353; \
-  __ret_353 = vsetq_lane_s8(vget_lane_s8(__s2_353, __p3_353), __s0_353, __p1_353); \
-  __ret_353; \
-})
-#else
-#define vcopyq_lane_s8(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \
-  int8x16_t __s0_354 = __p0_354; \
-  int8x8_t __s2_354 = __p2_354; \
-  int8x16_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_354;  __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_354; \
-  __ret_354 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_354, __p3_354), __rev0_354, __p1_354); \
-  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_354; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f32(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \
-  float32x4_t __s0_355 = __p0_355; \
-  float32x2_t __s2_355 = __p2_355; \
-  float32x4_t __ret_355; \
-  __ret_355 = vsetq_lane_f32(vget_lane_f32(__s2_355, __p3_355), __s0_355, __p1_355); \
-  __ret_355; \
-})
-#else
-#define vcopyq_lane_f32(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \
-  float32x4_t __s0_356 = __p0_356; \
-  float32x2_t __s2_356 = __p2_356; \
-  float32x4_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \
-  float32x2_t __rev2_356;  __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 1, 0); \
-  float32x4_t __ret_356; \
-  __ret_356 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_356, __p3_356), __rev0_356, __p1_356); \
-  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \
-  __ret_356; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s32(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \
-  int32x4_t __s0_357 = __p0_357; \
-  int32x2_t __s2_357 = __p2_357; \
-  int32x4_t __ret_357; \
-  __ret_357 = vsetq_lane_s32(vget_lane_s32(__s2_357, __p3_357), __s0_357, __p1_357); \
-  __ret_357; \
-})
-#else
-#define vcopyq_lane_s32(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \
-  int32x4_t __s0_358 = __p0_358; \
-  int32x2_t __s2_358 = __p2_358; \
-  int32x4_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 3, 2, 1, 0); \
-  int32x2_t __rev2_358;  __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 1, 0); \
-  int32x4_t __ret_358; \
-  __ret_358 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_358, __p3_358), __rev0_358, __p1_358); \
-  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 3, 2, 1, 0); \
-  __ret_358; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s64(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \
-  int64x2_t __s0_359 = __p0_359; \
-  int64x1_t __s2_359 = __p2_359; \
-  int64x2_t __ret_359; \
-  __ret_359 = vsetq_lane_s64(vget_lane_s64(__s2_359, __p3_359), __s0_359, __p1_359); \
-  __ret_359; \
-})
-#else
-#define vcopyq_lane_s64(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \
-  int64x2_t __s0_360 = __p0_360; \
-  int64x1_t __s2_360 = __p2_360; \
-  int64x2_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 1, 0); \
-  int64x2_t __ret_360; \
-  __ret_360 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_360, __p3_360), __rev0_360, __p1_360); \
-  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 1, 0); \
-  __ret_360; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s16(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \
-  int16x8_t __s0_361 = __p0_361; \
-  int16x4_t __s2_361 = __p2_361; \
-  int16x8_t __ret_361; \
-  __ret_361 = vsetq_lane_s16(vget_lane_s16(__s2_361, __p3_361), __s0_361, __p1_361); \
-  __ret_361; \
-})
-#else
-#define vcopyq_lane_s16(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \
-  int16x8_t __s0_362 = __p0_362; \
-  int16x4_t __s2_362 = __p2_362; \
-  int16x8_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_362;  __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 3, 2, 1, 0); \
-  int16x8_t __ret_362; \
-  __ret_362 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_362, __p3_362), __rev0_362, __p1_362); \
-  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_362; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p8(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \
-  poly8x8_t __s0_363 = __p0_363; \
-  poly8x8_t __s2_363 = __p2_363; \
-  poly8x8_t __ret_363; \
-  __ret_363 = vset_lane_p8(vget_lane_p8(__s2_363, __p3_363), __s0_363, __p1_363); \
-  __ret_363; \
-})
-#else
-#define vcopy_lane_p8(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \
-  poly8x8_t __s0_364 = __p0_364; \
-  poly8x8_t __s2_364 = __p2_364; \
-  poly8x8_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_364;  __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_364; \
-  __ret_364 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_364, __p3_364), __rev0_364, __p1_364); \
-  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_364; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p16(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \
-  poly16x4_t __s0_365 = __p0_365; \
-  poly16x4_t __s2_365 = __p2_365; \
-  poly16x4_t __ret_365; \
-  __ret_365 = vset_lane_p16(vget_lane_p16(__s2_365, __p3_365), __s0_365, __p1_365); \
-  __ret_365; \
-})
-#else
-#define vcopy_lane_p16(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \
-  poly16x4_t __s0_366 = __p0_366; \
-  poly16x4_t __s2_366 = __p2_366; \
-  poly16x4_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 3, 2, 1, 0); \
-  poly16x4_t __rev2_366;  __rev2_366 = __builtin_shufflevector(__s2_366, __s2_366, 3, 2, 1, 0); \
-  poly16x4_t __ret_366; \
-  __ret_366 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_366, __p3_366), __rev0_366, __p1_366); \
-  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 3, 2, 1, 0); \
-  __ret_366; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u8(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \
-  uint8x8_t __s0_367 = __p0_367; \
-  uint8x8_t __s2_367 = __p2_367; \
-  uint8x8_t __ret_367; \
-  __ret_367 = vset_lane_u8(vget_lane_u8(__s2_367, __p3_367), __s0_367, __p1_367); \
-  __ret_367; \
-})
-#else
-#define vcopy_lane_u8(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \
-  uint8x8_t __s0_368 = __p0_368; \
-  uint8x8_t __s2_368 = __p2_368; \
-  uint8x8_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_368;  __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_368; \
-  __ret_368 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_368, __p3_368), __rev0_368, __p1_368); \
-  __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_368; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u32(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \
-  uint32x2_t __s0_369 = __p0_369; \
-  uint32x2_t __s2_369 = __p2_369; \
-  uint32x2_t __ret_369; \
-  __ret_369 = vset_lane_u32(vget_lane_u32(__s2_369, __p3_369), __s0_369, __p1_369); \
-  __ret_369; \
-})
-#else
-#define vcopy_lane_u32(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \
-  uint32x2_t __s0_370 = __p0_370; \
-  uint32x2_t __s2_370 = __p2_370; \
-  uint32x2_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 1, 0); \
-  uint32x2_t __rev2_370;  __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 1, 0); \
-  uint32x2_t __ret_370; \
-  __ret_370 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_370, __p3_370), __rev0_370, __p1_370); \
-  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 1, 0); \
-  __ret_370; \
-})
-#endif
-
-#define vcopy_lane_u64(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \
-  uint64x1_t __s0_371 = __p0_371; \
-  uint64x1_t __s2_371 = __p2_371; \
-  uint64x1_t __ret_371; \
-  __ret_371 = vset_lane_u64(vget_lane_u64(__s2_371, __p3_371), __s0_371, __p1_371); \
-  __ret_371; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u16(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \
-  uint16x4_t __s0_372 = __p0_372; \
-  uint16x4_t __s2_372 = __p2_372; \
-  uint16x4_t __ret_372; \
-  __ret_372 = vset_lane_u16(vget_lane_u16(__s2_372, __p3_372), __s0_372, __p1_372); \
-  __ret_372; \
-})
-#else
-#define vcopy_lane_u16(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \
-  uint16x4_t __s0_373 = __p0_373; \
-  uint16x4_t __s2_373 = __p2_373; \
-  uint16x4_t __rev0_373;  __rev0_373 = __builtin_shufflevector(__s0_373, __s0_373, 3, 2, 1, 0); \
-  uint16x4_t __rev2_373;  __rev2_373 = __builtin_shufflevector(__s2_373, __s2_373, 3, 2, 1, 0); \
-  uint16x4_t __ret_373; \
-  __ret_373 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_373, __p3_373), __rev0_373, __p1_373); \
-  __ret_373 = __builtin_shufflevector(__ret_373, __ret_373, 3, 2, 1, 0); \
-  __ret_373; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s8(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \
-  int8x8_t __s0_374 = __p0_374; \
-  int8x8_t __s2_374 = __p2_374; \
-  int8x8_t __ret_374; \
-  __ret_374 = vset_lane_s8(vget_lane_s8(__s2_374, __p3_374), __s0_374, __p1_374); \
-  __ret_374; \
-})
-#else
-#define vcopy_lane_s8(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \
-  int8x8_t __s0_375 = __p0_375; \
-  int8x8_t __s2_375 = __p2_375; \
-  int8x8_t __rev0_375;  __rev0_375 = __builtin_shufflevector(__s0_375, __s0_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_375;  __rev2_375 = __builtin_shufflevector(__s2_375, __s2_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_375; \
-  __ret_375 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_375, __p3_375), __rev0_375, __p1_375); \
-  __ret_375 = __builtin_shufflevector(__ret_375, __ret_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_375; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_f32(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \
-  float32x2_t __s0_376 = __p0_376; \
-  float32x2_t __s2_376 = __p2_376; \
-  float32x2_t __ret_376; \
-  __ret_376 = vset_lane_f32(vget_lane_f32(__s2_376, __p3_376), __s0_376, __p1_376); \
-  __ret_376; \
-})
-#else
-#define vcopy_lane_f32(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \
-  float32x2_t __s0_377 = __p0_377; \
-  float32x2_t __s2_377 = __p2_377; \
-  float32x2_t __rev0_377;  __rev0_377 = __builtin_shufflevector(__s0_377, __s0_377, 1, 0); \
-  float32x2_t __rev2_377;  __rev2_377 = __builtin_shufflevector(__s2_377, __s2_377, 1, 0); \
-  float32x2_t __ret_377; \
-  __ret_377 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_377, __p3_377), __rev0_377, __p1_377); \
-  __ret_377 = __builtin_shufflevector(__ret_377, __ret_377, 1, 0); \
-  __ret_377; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s32(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \
-  int32x2_t __s0_378 = __p0_378; \
-  int32x2_t __s2_378 = __p2_378; \
-  int32x2_t __ret_378; \
-  __ret_378 = vset_lane_s32(vget_lane_s32(__s2_378, __p3_378), __s0_378, __p1_378); \
-  __ret_378; \
-})
-#else
-#define vcopy_lane_s32(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \
-  int32x2_t __s0_379 = __p0_379; \
-  int32x2_t __s2_379 = __p2_379; \
-  int32x2_t __rev0_379;  __rev0_379 = __builtin_shufflevector(__s0_379, __s0_379, 1, 0); \
-  int32x2_t __rev2_379;  __rev2_379 = __builtin_shufflevector(__s2_379, __s2_379, 1, 0); \
-  int32x2_t __ret_379; \
-  __ret_379 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_379, __p3_379), __rev0_379, __p1_379); \
-  __ret_379 = __builtin_shufflevector(__ret_379, __ret_379, 1, 0); \
-  __ret_379; \
-})
-#endif
-
-#define vcopy_lane_s64(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \
-  int64x1_t __s0_380 = __p0_380; \
-  int64x1_t __s2_380 = __p2_380; \
-  int64x1_t __ret_380; \
-  __ret_380 = vset_lane_s64(vget_lane_s64(__s2_380, __p3_380), __s0_380, __p1_380); \
-  __ret_380; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \
-  int16x4_t __s0_381 = __p0_381; \
-  int16x4_t __s2_381 = __p2_381; \
-  int16x4_t __ret_381; \
-  __ret_381 = vset_lane_s16(vget_lane_s16(__s2_381, __p3_381), __s0_381, __p1_381); \
-  __ret_381; \
-})
-#else
-#define vcopy_lane_s16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \
-  int16x4_t __s0_382 = __p0_382; \
-  int16x4_t __s2_382 = __p2_382; \
-  int16x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
-  int16x4_t __rev2_382;  __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \
-  int16x4_t __ret_382; \
-  __ret_382 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_382, __p3_382), __rev0_382, __p1_382); \
-  __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 3, 2, 1, 0); \
-  __ret_382; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \
-  poly8x16_t __s0_383 = __p0_383; \
-  poly8x16_t __s2_383 = __p2_383; \
-  poly8x16_t __ret_383; \
-  __ret_383 = vsetq_lane_p8(vgetq_lane_p8(__s2_383, __p3_383), __s0_383, __p1_383); \
-  __ret_383; \
-})
-#else
-#define vcopyq_laneq_p8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \
-  poly8x16_t __s0_384 = __p0_384; \
-  poly8x16_t __s2_384 = __p2_384; \
-  poly8x16_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_384;  __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_384; \
-  __ret_384 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_384, __p3_384), __rev0_384, __p1_384); \
-  __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_384; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p16(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \
-  poly16x8_t __s0_385 = __p0_385; \
-  poly16x8_t __s2_385 = __p2_385; \
-  poly16x8_t __ret_385; \
-  __ret_385 = vsetq_lane_p16(vgetq_lane_p16(__s2_385, __p3_385), __s0_385, __p1_385); \
-  __ret_385; \
-})
-#else
-#define vcopyq_laneq_p16(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \
-  poly16x8_t __s0_386 = __p0_386; \
-  poly16x8_t __s2_386 = __p2_386; \
-  poly16x8_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev2_386;  __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_386; \
-  __ret_386 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_386, __p3_386), __rev0_386, __p1_386); \
-  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_386; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u8(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \
-  uint8x16_t __s0_387 = __p0_387; \
-  uint8x16_t __s2_387 = __p2_387; \
-  uint8x16_t __ret_387; \
-  __ret_387 = vsetq_lane_u8(vgetq_lane_u8(__s2_387, __p3_387), __s0_387, __p1_387); \
-  __ret_387; \
-})
-#else
-#define vcopyq_laneq_u8(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \
-  uint8x16_t __s0_388 = __p0_388; \
-  uint8x16_t __s2_388 = __p2_388; \
-  uint8x16_t __rev0_388;  __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_388;  __rev2_388 = __builtin_shufflevector(__s2_388, __s2_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_388; \
-  __ret_388 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_388, __p3_388), __rev0_388, __p1_388); \
-  __ret_388 = __builtin_shufflevector(__ret_388, __ret_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_388; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u32(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \
-  uint32x4_t __s0_389 = __p0_389; \
-  uint32x4_t __s2_389 = __p2_389; \
-  uint32x4_t __ret_389; \
-  __ret_389 = vsetq_lane_u32(vgetq_lane_u32(__s2_389, __p3_389), __s0_389, __p1_389); \
-  __ret_389; \
-})
-#else
-#define vcopyq_laneq_u32(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \
-  uint32x4_t __s0_390 = __p0_390; \
-  uint32x4_t __s2_390 = __p2_390; \
-  uint32x4_t __rev0_390;  __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 3, 2, 1, 0); \
-  uint32x4_t __rev2_390;  __rev2_390 = __builtin_shufflevector(__s2_390, __s2_390, 3, 2, 1, 0); \
-  uint32x4_t __ret_390; \
-  __ret_390 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_390, __p3_390), __rev0_390, __p1_390); \
-  __ret_390 = __builtin_shufflevector(__ret_390, __ret_390, 3, 2, 1, 0); \
-  __ret_390; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u64(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \
-  uint64x2_t __s0_391 = __p0_391; \
-  uint64x2_t __s2_391 = __p2_391; \
-  uint64x2_t __ret_391; \
-  __ret_391 = vsetq_lane_u64(vgetq_lane_u64(__s2_391, __p3_391), __s0_391, __p1_391); \
-  __ret_391; \
-})
-#else
-#define vcopyq_laneq_u64(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \
-  uint64x2_t __s0_392 = __p0_392; \
-  uint64x2_t __s2_392 = __p2_392; \
-  uint64x2_t __rev0_392;  __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 1, 0); \
-  uint64x2_t __rev2_392;  __rev2_392 = __builtin_shufflevector(__s2_392, __s2_392, 1, 0); \
-  uint64x2_t __ret_392; \
-  __ret_392 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_392, __p3_392), __rev0_392, __p1_392); \
-  __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 1, 0); \
-  __ret_392; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u16(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \
-  uint16x8_t __s0_393 = __p0_393; \
-  uint16x8_t __s2_393 = __p2_393; \
-  uint16x8_t __ret_393; \
-  __ret_393 = vsetq_lane_u16(vgetq_lane_u16(__s2_393, __p3_393), __s0_393, __p1_393); \
-  __ret_393; \
-})
-#else
-#define vcopyq_laneq_u16(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \
-  uint16x8_t __s0_394 = __p0_394; \
-  uint16x8_t __s2_394 = __p2_394; \
-  uint16x8_t __rev0_394;  __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_394;  __rev2_394 = __builtin_shufflevector(__s2_394, __s2_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_394; \
-  __ret_394 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_394, __p3_394), __rev0_394, __p1_394); \
-  __ret_394 = __builtin_shufflevector(__ret_394, __ret_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_394; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s8(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \
-  int8x16_t __s0_395 = __p0_395; \
-  int8x16_t __s2_395 = __p2_395; \
-  int8x16_t __ret_395; \
-  __ret_395 = vsetq_lane_s8(vgetq_lane_s8(__s2_395, __p3_395), __s0_395, __p1_395); \
-  __ret_395; \
-})
-#else
-#define vcopyq_laneq_s8(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \
-  int8x16_t __s0_396 = __p0_396; \
-  int8x16_t __s2_396 = __p2_396; \
-  int8x16_t __rev0_396;  __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_396;  __rev2_396 = __builtin_shufflevector(__s2_396, __s2_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_396; \
-  __ret_396 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_396, __p3_396), __rev0_396, __p1_396); \
-  __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_396; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f32(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \
-  float32x4_t __s0_397 = __p0_397; \
-  float32x4_t __s2_397 = __p2_397; \
-  float32x4_t __ret_397; \
-  __ret_397 = vsetq_lane_f32(vgetq_lane_f32(__s2_397, __p3_397), __s0_397, __p1_397); \
-  __ret_397; \
-})
-#else
-#define vcopyq_laneq_f32(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \
-  float32x4_t __s0_398 = __p0_398; \
-  float32x4_t __s2_398 = __p2_398; \
-  float32x4_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \
-  float32x4_t __rev2_398;  __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \
-  float32x4_t __ret_398; \
-  __ret_398 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_398, __p3_398), __rev0_398, __p1_398); \
-  __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \
-  __ret_398; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s32(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \
-  int32x4_t __s0_399 = __p0_399; \
-  int32x4_t __s2_399 = __p2_399; \
-  int32x4_t __ret_399; \
-  __ret_399 = vsetq_lane_s32(vgetq_lane_s32(__s2_399, __p3_399), __s0_399, __p1_399); \
-  __ret_399; \
-})
-#else
-#define vcopyq_laneq_s32(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \
-  int32x4_t __s0_400 = __p0_400; \
-  int32x4_t __s2_400 = __p2_400; \
-  int32x4_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 3, 2, 1, 0); \
-  int32x4_t __rev2_400;  __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 3, 2, 1, 0); \
-  int32x4_t __ret_400; \
-  __ret_400 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_400, __p3_400), __rev0_400, __p1_400); \
-  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 3, 2, 1, 0); \
-  __ret_400; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s64(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
-  int64x2_t __s0_401 = __p0_401; \
-  int64x2_t __s2_401 = __p2_401; \
-  int64x2_t __ret_401; \
-  __ret_401 = vsetq_lane_s64(vgetq_lane_s64(__s2_401, __p3_401), __s0_401, __p1_401); \
-  __ret_401; \
-})
-#else
-#define vcopyq_laneq_s64(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
-  int64x2_t __s0_402 = __p0_402; \
-  int64x2_t __s2_402 = __p2_402; \
-  int64x2_t __rev0_402;  __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 1, 0); \
-  int64x2_t __rev2_402;  __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 1, 0); \
-  int64x2_t __ret_402; \
-  __ret_402 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_402, __p3_402), __rev0_402, __p1_402); \
-  __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 1, 0); \
-  __ret_402; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s16(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
-  int16x8_t __s0_403 = __p0_403; \
-  int16x8_t __s2_403 = __p2_403; \
-  int16x8_t __ret_403; \
-  __ret_403 = vsetq_lane_s16(vgetq_lane_s16(__s2_403, __p3_403), __s0_403, __p1_403); \
-  __ret_403; \
-})
-#else
-#define vcopyq_laneq_s16(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
-  int16x8_t __s0_404 = __p0_404; \
-  int16x8_t __s2_404 = __p2_404; \
-  int16x8_t __rev0_404;  __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_404;  __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_404; \
-  __ret_404 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_404, __p3_404), __rev0_404, __p1_404); \
-  __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_404; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p8(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
-  poly8x8_t __s0_405 = __p0_405; \
-  poly8x16_t __s2_405 = __p2_405; \
-  poly8x8_t __ret_405; \
-  __ret_405 = vset_lane_p8(vgetq_lane_p8(__s2_405, __p3_405), __s0_405, __p1_405); \
-  __ret_405; \
-})
-#else
-#define vcopy_laneq_p8(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
-  poly8x8_t __s0_406 = __p0_406; \
-  poly8x16_t __s2_406 = __p2_406; \
-  poly8x8_t __rev0_406;  __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_406;  __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_406; \
-  __ret_406 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_406, __p3_406), __rev0_406, __p1_406); \
-  __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_406; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p16(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
-  poly16x4_t __s0_407 = __p0_407; \
-  poly16x8_t __s2_407 = __p2_407; \
-  poly16x4_t __ret_407; \
-  __ret_407 = vset_lane_p16(vgetq_lane_p16(__s2_407, __p3_407), __s0_407, __p1_407); \
-  __ret_407; \
-})
-#else
-#define vcopy_laneq_p16(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
-  poly16x4_t __s0_408 = __p0_408; \
-  poly16x8_t __s2_408 = __p2_408; \
-  poly16x4_t __rev0_408;  __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 3, 2, 1, 0); \
-  poly16x8_t __rev2_408;  __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_408; \
-  __ret_408 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_408, __p3_408), __rev0_408, __p1_408); \
-  __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 3, 2, 1, 0); \
-  __ret_408; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u8(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
-  uint8x8_t __s0_409 = __p0_409; \
-  uint8x16_t __s2_409 = __p2_409; \
-  uint8x8_t __ret_409; \
-  __ret_409 = vset_lane_u8(vgetq_lane_u8(__s2_409, __p3_409), __s0_409, __p1_409); \
-  __ret_409; \
-})
-#else
-#define vcopy_laneq_u8(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
-  uint8x8_t __s0_410 = __p0_410; \
-  uint8x16_t __s2_410 = __p2_410; \
-  uint8x8_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_410; \
-  __ret_410 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_410, __p3_410), __rev0_410, __p1_410); \
-  __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_410; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u32(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
-  uint32x2_t __s0_411 = __p0_411; \
-  uint32x4_t __s2_411 = __p2_411; \
-  uint32x2_t __ret_411; \
-  __ret_411 = vset_lane_u32(vgetq_lane_u32(__s2_411, __p3_411), __s0_411, __p1_411); \
-  __ret_411; \
-})
-#else
-#define vcopy_laneq_u32(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
-  uint32x2_t __s0_412 = __p0_412; \
-  uint32x4_t __s2_412 = __p2_412; \
-  uint32x2_t __rev0_412;  __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 1, 0); \
-  uint32x4_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 3, 2, 1, 0); \
-  uint32x2_t __ret_412; \
-  __ret_412 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_412, __p3_412), __rev0_412, __p1_412); \
-  __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 1, 0); \
-  __ret_412; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u64(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
-  uint64x1_t __s0_413 = __p0_413; \
-  uint64x2_t __s2_413 = __p2_413; \
-  uint64x1_t __ret_413; \
-  __ret_413 = vset_lane_u64(vgetq_lane_u64(__s2_413, __p3_413), __s0_413, __p1_413); \
-  __ret_413; \
-})
-#else
-#define vcopy_laneq_u64(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
-  uint64x1_t __s0_414 = __p0_414; \
-  uint64x2_t __s2_414 = __p2_414; \
-  uint64x2_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 1, 0); \
-  uint64x1_t __ret_414; \
-  __ret_414 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_414, __p3_414), __s0_414, __p1_414); \
-  __ret_414; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u16(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
-  uint16x4_t __s0_415 = __p0_415; \
-  uint16x8_t __s2_415 = __p2_415; \
-  uint16x4_t __ret_415; \
-  __ret_415 = vset_lane_u16(vgetq_lane_u16(__s2_415, __p3_415), __s0_415, __p1_415); \
-  __ret_415; \
-})
-#else
-#define vcopy_laneq_u16(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
-  uint16x4_t __s0_416 = __p0_416; \
-  uint16x8_t __s2_416 = __p2_416; \
-  uint16x4_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \
-  uint16x8_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_416; \
-  __ret_416 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_416, __p3_416), __rev0_416, __p1_416); \
-  __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 3, 2, 1, 0); \
-  __ret_416; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s8(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
-  int8x8_t __s0_417 = __p0_417; \
-  int8x16_t __s2_417 = __p2_417; \
-  int8x8_t __ret_417; \
-  __ret_417 = vset_lane_s8(vgetq_lane_s8(__s2_417, __p3_417), __s0_417, __p1_417); \
-  __ret_417; \
-})
-#else
-#define vcopy_laneq_s8(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
-  int8x8_t __s0_418 = __p0_418; \
-  int8x16_t __s2_418 = __p2_418; \
-  int8x8_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_418; \
-  __ret_418 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_418, __p3_418), __rev0_418, __p1_418); \
-  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_418; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f32(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
-  float32x2_t __s0_419 = __p0_419; \
-  float32x4_t __s2_419 = __p2_419; \
-  float32x2_t __ret_419; \
-  __ret_419 = vset_lane_f32(vgetq_lane_f32(__s2_419, __p3_419), __s0_419, __p1_419); \
-  __ret_419; \
-})
-#else
-#define vcopy_laneq_f32(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
-  float32x2_t __s0_420 = __p0_420; \
-  float32x4_t __s2_420 = __p2_420; \
-  float32x2_t __rev0_420;  __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 1, 0); \
-  float32x4_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 3, 2, 1, 0); \
-  float32x2_t __ret_420; \
-  __ret_420 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_420, __p3_420), __rev0_420, __p1_420); \
-  __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 1, 0); \
-  __ret_420; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
-  int32x2_t __s0_421 = __p0_421; \
-  int32x4_t __s2_421 = __p2_421; \
-  int32x2_t __ret_421; \
-  __ret_421 = vset_lane_s32(vgetq_lane_s32(__s2_421, __p3_421), __s0_421, __p1_421); \
-  __ret_421; \
-})
-#else
-#define vcopy_laneq_s32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
-  int32x2_t __s0_422 = __p0_422; \
-  int32x4_t __s2_422 = __p2_422; \
-  int32x2_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 1, 0); \
-  int32x4_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \
-  int32x2_t __ret_422; \
-  __ret_422 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_422, __p3_422), __rev0_422, __p1_422); \
-  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 1, 0); \
-  __ret_422; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s64(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
-  int64x1_t __s0_423 = __p0_423; \
-  int64x2_t __s2_423 = __p2_423; \
-  int64x1_t __ret_423; \
-  __ret_423 = vset_lane_s64(vgetq_lane_s64(__s2_423, __p3_423), __s0_423, __p1_423); \
-  __ret_423; \
-})
-#else
-#define vcopy_laneq_s64(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
-  int64x1_t __s0_424 = __p0_424; \
-  int64x2_t __s2_424 = __p2_424; \
-  int64x2_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 1, 0); \
-  int64x1_t __ret_424; \
-  __ret_424 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_424, __p3_424), __s0_424, __p1_424); \
-  __ret_424; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s16(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
-  int16x4_t __s0_425 = __p0_425; \
-  int16x8_t __s2_425 = __p2_425; \
-  int16x4_t __ret_425; \
-  __ret_425 = vset_lane_s16(vgetq_lane_s16(__s2_425, __p3_425), __s0_425, __p1_425); \
-  __ret_425; \
-})
-#else
-#define vcopy_laneq_s16(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
-  int16x4_t __s0_426 = __p0_426; \
-  int16x8_t __s2_426 = __p2_426; \
-  int16x4_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 3, 2, 1, 0); \
-  int16x8_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_426; \
-  __ret_426 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_426, __p3_426), __rev0_426, __p1_426); \
-  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 3, 2, 1, 0); \
-  __ret_426; \
-})
-#endif
-
-#define vcreate_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float64x1_t)(__promote); \
-  __ret; \
-})
-__ai float32_t vcvts_f32_s32(int32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
-  return __ret;
-}
-__ai float32_t vcvts_f32_u32(uint32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vcvtd_f64_s64(int64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
-  return __ret;
-}
-__ai float64_t vcvtd_f64_u64(uint64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x8_t __ret;
-  __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
-  return __ret;
-}
-#else
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_f16(vget_high_f16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = vcvt_f64_f32(vget_high_f32(__p0));
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
-  __ret; \
-})
-#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
-  __ret; \
-})
-__ai int32_t vcvts_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai uint32_t vcvts_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai int32_t vcvtas_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtad_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtas_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtad_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtms_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtmd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtms_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtns_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtnd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtns_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtps_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtpd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtps_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
-  return __ret;
-}
-__ai float32_t vcvtxd_f32_f64(float64_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_p64(__p0_427, __p1_427) __extension__ ({ \
-  poly64x1_t __s0_427 = __p0_427; \
-  poly64x1_t __ret_427; \
-  __ret_427 = splat_lane_p64(__s0_427, __p1_427); \
-  __ret_427; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p64(__p0_428, __p1_428) __extension__ ({ \
-  poly64x1_t __s0_428 = __p0_428; \
-  poly64x2_t __ret_428; \
-  __ret_428 = splatq_lane_p64(__s0_428, __p1_428); \
-  __ret_428; \
-})
-#else
-#define vdupq_lane_p64(__p0_429, __p1_429) __extension__ ({ \
-  poly64x1_t __s0_429 = __p0_429; \
-  poly64x2_t __ret_429; \
-  __ret_429 = __noswap_splatq_lane_p64(__s0_429, __p1_429); \
-  __ret_429 = __builtin_shufflevector(__ret_429, __ret_429, 1, 0); \
-  __ret_429; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f64(__p0_430, __p1_430) __extension__ ({ \
-  float64x1_t __s0_430 = __p0_430; \
-  float64x2_t __ret_430; \
-  __ret_430 = splatq_lane_f64(__s0_430, __p1_430); \
-  __ret_430; \
-})
-#else
-#define vdupq_lane_f64(__p0_431, __p1_431) __extension__ ({ \
-  float64x1_t __s0_431 = __p0_431; \
-  float64x2_t __ret_431; \
-  __ret_431 = __noswap_splatq_lane_f64(__s0_431, __p1_431); \
-  __ret_431 = __builtin_shufflevector(__ret_431, __ret_431, 1, 0); \
-  __ret_431; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0_432, __p1_432) __extension__ ({ \
-  float16x4_t __s0_432 = __p0_432; \
-  float16x8_t __ret_432; \
-  __ret_432 = splatq_lane_f16(__s0_432, __p1_432); \
-  __ret_432; \
-})
-#else
-#define vdupq_lane_f16(__p0_433, __p1_433) __extension__ ({ \
-  float16x4_t __s0_433 = __p0_433; \
-  float16x4_t __rev0_433;  __rev0_433 = __builtin_shufflevector(__s0_433, __s0_433, 3, 2, 1, 0); \
-  float16x8_t __ret_433; \
-  __ret_433 = __noswap_splatq_lane_f16(__rev0_433, __p1_433); \
-  __ret_433 = __builtin_shufflevector(__ret_433, __ret_433, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_433; \
-})
-#endif
-
-#define vdup_lane_f64(__p0_434, __p1_434) __extension__ ({ \
-  float64x1_t __s0_434 = __p0_434; \
-  float64x1_t __ret_434; \
-  __ret_434 = splat_lane_f64(__s0_434, __p1_434); \
-  __ret_434; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0_435, __p1_435) __extension__ ({ \
-  float16x4_t __s0_435 = __p0_435; \
-  float16x4_t __ret_435; \
-  __ret_435 = splat_lane_f16(__s0_435, __p1_435); \
-  __ret_435; \
-})
-#else
-#define vdup_lane_f16(__p0_436, __p1_436) __extension__ ({ \
-  float16x4_t __s0_436 = __p0_436; \
-  float16x4_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 3, 2, 1, 0); \
-  float16x4_t __ret_436; \
-  __ret_436 = __noswap_splat_lane_f16(__rev0_436, __p1_436); \
-  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 3, 2, 1, 0); \
-  __ret_436; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p8(__p0_437, __p1_437) __extension__ ({ \
-  poly8x16_t __s0_437 = __p0_437; \
-  poly8x8_t __ret_437; \
-  __ret_437 = splat_laneq_p8(__s0_437, __p1_437); \
-  __ret_437; \
-})
-#else
-#define vdup_laneq_p8(__p0_438, __p1_438) __extension__ ({ \
-  poly8x16_t __s0_438 = __p0_438; \
-  poly8x16_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_438; \
-  __ret_438 = __noswap_splat_laneq_p8(__rev0_438, __p1_438); \
-  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_438; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p64(__p0_439, __p1_439) __extension__ ({ \
-  poly64x2_t __s0_439 = __p0_439; \
-  poly64x1_t __ret_439; \
-  __ret_439 = splat_laneq_p64(__s0_439, __p1_439); \
-  __ret_439; \
-})
-#else
-#define vdup_laneq_p64(__p0_440, __p1_440) __extension__ ({ \
-  poly64x2_t __s0_440 = __p0_440; \
-  poly64x2_t __rev0_440;  __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 1, 0); \
-  poly64x1_t __ret_440; \
-  __ret_440 = __noswap_splat_laneq_p64(__rev0_440, __p1_440); \
-  __ret_440; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p16(__p0_441, __p1_441) __extension__ ({ \
-  poly16x8_t __s0_441 = __p0_441; \
-  poly16x4_t __ret_441; \
-  __ret_441 = splat_laneq_p16(__s0_441, __p1_441); \
-  __ret_441; \
-})
-#else
-#define vdup_laneq_p16(__p0_442, __p1_442) __extension__ ({ \
-  poly16x8_t __s0_442 = __p0_442; \
-  poly16x8_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_442; \
-  __ret_442 = __noswap_splat_laneq_p16(__rev0_442, __p1_442); \
-  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
-  __ret_442; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p8(__p0_443, __p1_443) __extension__ ({ \
-  poly8x16_t __s0_443 = __p0_443; \
-  poly8x16_t __ret_443; \
-  __ret_443 = splatq_laneq_p8(__s0_443, __p1_443); \
-  __ret_443; \
-})
-#else
-#define vdupq_laneq_p8(__p0_444, __p1_444) __extension__ ({ \
-  poly8x16_t __s0_444 = __p0_444; \
-  poly8x16_t __rev0_444;  __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_444; \
-  __ret_444 = __noswap_splatq_laneq_p8(__rev0_444, __p1_444); \
-  __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_444; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p64(__p0_445, __p1_445) __extension__ ({ \
-  poly64x2_t __s0_445 = __p0_445; \
-  poly64x2_t __ret_445; \
-  __ret_445 = splatq_laneq_p64(__s0_445, __p1_445); \
-  __ret_445; \
-})
-#else
-#define vdupq_laneq_p64(__p0_446, __p1_446) __extension__ ({ \
-  poly64x2_t __s0_446 = __p0_446; \
-  poly64x2_t __rev0_446;  __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 1, 0); \
-  poly64x2_t __ret_446; \
-  __ret_446 = __noswap_splatq_laneq_p64(__rev0_446, __p1_446); \
-  __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 1, 0); \
-  __ret_446; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p16(__p0_447, __p1_447) __extension__ ({ \
-  poly16x8_t __s0_447 = __p0_447; \
-  poly16x8_t __ret_447; \
-  __ret_447 = splatq_laneq_p16(__s0_447, __p1_447); \
-  __ret_447; \
-})
-#else
-#define vdupq_laneq_p16(__p0_448, __p1_448) __extension__ ({ \
-  poly16x8_t __s0_448 = __p0_448; \
-  poly16x8_t __rev0_448;  __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_448; \
-  __ret_448 = __noswap_splatq_laneq_p16(__rev0_448, __p1_448); \
-  __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_448; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u8(__p0_449, __p1_449) __extension__ ({ \
-  uint8x16_t __s0_449 = __p0_449; \
-  uint8x16_t __ret_449; \
-  __ret_449 = splatq_laneq_u8(__s0_449, __p1_449); \
-  __ret_449; \
-})
-#else
-#define vdupq_laneq_u8(__p0_450, __p1_450) __extension__ ({ \
-  uint8x16_t __s0_450 = __p0_450; \
-  uint8x16_t __rev0_450;  __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_450; \
-  __ret_450 = __noswap_splatq_laneq_u8(__rev0_450, __p1_450); \
-  __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_450; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u32(__p0_451, __p1_451) __extension__ ({ \
-  uint32x4_t __s0_451 = __p0_451; \
-  uint32x4_t __ret_451; \
-  __ret_451 = splatq_laneq_u32(__s0_451, __p1_451); \
-  __ret_451; \
-})
-#else
-#define vdupq_laneq_u32(__p0_452, __p1_452) __extension__ ({ \
-  uint32x4_t __s0_452 = __p0_452; \
-  uint32x4_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 3, 2, 1, 0); \
-  uint32x4_t __ret_452; \
-  __ret_452 = __noswap_splatq_laneq_u32(__rev0_452, __p1_452); \
-  __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 3, 2, 1, 0); \
-  __ret_452; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u64(__p0_453, __p1_453) __extension__ ({ \
-  uint64x2_t __s0_453 = __p0_453; \
-  uint64x2_t __ret_453; \
-  __ret_453 = splatq_laneq_u64(__s0_453, __p1_453); \
-  __ret_453; \
-})
-#else
-#define vdupq_laneq_u64(__p0_454, __p1_454) __extension__ ({ \
-  uint64x2_t __s0_454 = __p0_454; \
-  uint64x2_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 1, 0); \
-  uint64x2_t __ret_454; \
-  __ret_454 = __noswap_splatq_laneq_u64(__rev0_454, __p1_454); \
-  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 1, 0); \
-  __ret_454; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u16(__p0_455, __p1_455) __extension__ ({ \
-  uint16x8_t __s0_455 = __p0_455; \
-  uint16x8_t __ret_455; \
-  __ret_455 = splatq_laneq_u16(__s0_455, __p1_455); \
-  __ret_455; \
-})
-#else
-#define vdupq_laneq_u16(__p0_456, __p1_456) __extension__ ({ \
-  uint16x8_t __s0_456 = __p0_456; \
-  uint16x8_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_456; \
-  __ret_456 = __noswap_splatq_laneq_u16(__rev0_456, __p1_456); \
-  __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_456; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s8(__p0_457, __p1_457) __extension__ ({ \
-  int8x16_t __s0_457 = __p0_457; \
-  int8x16_t __ret_457; \
-  __ret_457 = splatq_laneq_s8(__s0_457, __p1_457); \
-  __ret_457; \
-})
-#else
-#define vdupq_laneq_s8(__p0_458, __p1_458) __extension__ ({ \
-  int8x16_t __s0_458 = __p0_458; \
-  int8x16_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_458; \
-  __ret_458 = __noswap_splatq_laneq_s8(__rev0_458, __p1_458); \
-  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_458; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f64(__p0_459, __p1_459) __extension__ ({ \
-  float64x2_t __s0_459 = __p0_459; \
-  float64x2_t __ret_459; \
-  __ret_459 = splatq_laneq_f64(__s0_459, __p1_459); \
-  __ret_459; \
-})
-#else
-#define vdupq_laneq_f64(__p0_460, __p1_460) __extension__ ({ \
-  float64x2_t __s0_460 = __p0_460; \
-  float64x2_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \
-  float64x2_t __ret_460; \
-  __ret_460 = __noswap_splatq_laneq_f64(__rev0_460, __p1_460); \
-  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \
-  __ret_460; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f32(__p0_461, __p1_461) __extension__ ({ \
-  float32x4_t __s0_461 = __p0_461; \
-  float32x4_t __ret_461; \
-  __ret_461 = splatq_laneq_f32(__s0_461, __p1_461); \
-  __ret_461; \
-})
-#else
-#define vdupq_laneq_f32(__p0_462, __p1_462) __extension__ ({ \
-  float32x4_t __s0_462 = __p0_462; \
-  float32x4_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \
-  float32x4_t __ret_462; \
-  __ret_462 = __noswap_splatq_laneq_f32(__rev0_462, __p1_462); \
-  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \
-  __ret_462; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f16(__p0_463, __p1_463) __extension__ ({ \
-  float16x8_t __s0_463 = __p0_463; \
-  float16x8_t __ret_463; \
-  __ret_463 = splatq_laneq_f16(__s0_463, __p1_463); \
-  __ret_463; \
-})
-#else
-#define vdupq_laneq_f16(__p0_464, __p1_464) __extension__ ({ \
-  float16x8_t __s0_464 = __p0_464; \
-  float16x8_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_464; \
-  __ret_464 = __noswap_splatq_laneq_f16(__rev0_464, __p1_464); \
-  __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_464; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s32(__p0_465, __p1_465) __extension__ ({ \
-  int32x4_t __s0_465 = __p0_465; \
-  int32x4_t __ret_465; \
-  __ret_465 = splatq_laneq_s32(__s0_465, __p1_465); \
-  __ret_465; \
-})
-#else
-#define vdupq_laneq_s32(__p0_466, __p1_466) __extension__ ({ \
-  int32x4_t __s0_466 = __p0_466; \
-  int32x4_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \
-  int32x4_t __ret_466; \
-  __ret_466 = __noswap_splatq_laneq_s32(__rev0_466, __p1_466); \
-  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \
-  __ret_466; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s64(__p0_467, __p1_467) __extension__ ({ \
-  int64x2_t __s0_467 = __p0_467; \
-  int64x2_t __ret_467; \
-  __ret_467 = splatq_laneq_s64(__s0_467, __p1_467); \
-  __ret_467; \
-})
-#else
-#define vdupq_laneq_s64(__p0_468, __p1_468) __extension__ ({ \
-  int64x2_t __s0_468 = __p0_468; \
-  int64x2_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 1, 0); \
-  int64x2_t __ret_468; \
-  __ret_468 = __noswap_splatq_laneq_s64(__rev0_468, __p1_468); \
-  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 1, 0); \
-  __ret_468; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s16(__p0_469, __p1_469) __extension__ ({ \
-  int16x8_t __s0_469 = __p0_469; \
-  int16x8_t __ret_469; \
-  __ret_469 = splatq_laneq_s16(__s0_469, __p1_469); \
-  __ret_469; \
-})
-#else
-#define vdupq_laneq_s16(__p0_470, __p1_470) __extension__ ({ \
-  int16x8_t __s0_470 = __p0_470; \
-  int16x8_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_470; \
-  __ret_470 = __noswap_splatq_laneq_s16(__rev0_470, __p1_470); \
-  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_470; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u8(__p0_471, __p1_471) __extension__ ({ \
-  uint8x16_t __s0_471 = __p0_471; \
-  uint8x8_t __ret_471; \
-  __ret_471 = splat_laneq_u8(__s0_471, __p1_471); \
-  __ret_471; \
-})
-#else
-#define vdup_laneq_u8(__p0_472, __p1_472) __extension__ ({ \
-  uint8x16_t __s0_472 = __p0_472; \
-  uint8x16_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_472; \
-  __ret_472 = __noswap_splat_laneq_u8(__rev0_472, __p1_472); \
-  __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_472; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u32(__p0_473, __p1_473) __extension__ ({ \
-  uint32x4_t __s0_473 = __p0_473; \
-  uint32x2_t __ret_473; \
-  __ret_473 = splat_laneq_u32(__s0_473, __p1_473); \
-  __ret_473; \
-})
-#else
-#define vdup_laneq_u32(__p0_474, __p1_474) __extension__ ({ \
-  uint32x4_t __s0_474 = __p0_474; \
-  uint32x4_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 3, 2, 1, 0); \
-  uint32x2_t __ret_474; \
-  __ret_474 = __noswap_splat_laneq_u32(__rev0_474, __p1_474); \
-  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 1, 0); \
-  __ret_474; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u64(__p0_475, __p1_475) __extension__ ({ \
-  uint64x2_t __s0_475 = __p0_475; \
-  uint64x1_t __ret_475; \
-  __ret_475 = splat_laneq_u64(__s0_475, __p1_475); \
-  __ret_475; \
-})
-#else
-#define vdup_laneq_u64(__p0_476, __p1_476) __extension__ ({ \
-  uint64x2_t __s0_476 = __p0_476; \
-  uint64x2_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 1, 0); \
-  uint64x1_t __ret_476; \
-  __ret_476 = __noswap_splat_laneq_u64(__rev0_476, __p1_476); \
-  __ret_476; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u16(__p0_477, __p1_477) __extension__ ({ \
-  uint16x8_t __s0_477 = __p0_477; \
-  uint16x4_t __ret_477; \
-  __ret_477 = splat_laneq_u16(__s0_477, __p1_477); \
-  __ret_477; \
-})
-#else
-#define vdup_laneq_u16(__p0_478, __p1_478) __extension__ ({ \
-  uint16x8_t __s0_478 = __p0_478; \
-  uint16x8_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_478; \
-  __ret_478 = __noswap_splat_laneq_u16(__rev0_478, __p1_478); \
-  __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \
-  __ret_478; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s8(__p0_479, __p1_479) __extension__ ({ \
-  int8x16_t __s0_479 = __p0_479; \
-  int8x8_t __ret_479; \
-  __ret_479 = splat_laneq_s8(__s0_479, __p1_479); \
-  __ret_479; \
-})
-#else
-#define vdup_laneq_s8(__p0_480, __p1_480) __extension__ ({ \
-  int8x16_t __s0_480 = __p0_480; \
-  int8x16_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_480; \
-  __ret_480 = __noswap_splat_laneq_s8(__rev0_480, __p1_480); \
-  __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_480; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f64(__p0_481, __p1_481) __extension__ ({ \
-  float64x2_t __s0_481 = __p0_481; \
-  float64x1_t __ret_481; \
-  __ret_481 = splat_laneq_f64(__s0_481, __p1_481); \
-  __ret_481; \
-})
-#else
-#define vdup_laneq_f64(__p0_482, __p1_482) __extension__ ({ \
-  float64x2_t __s0_482 = __p0_482; \
-  float64x2_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \
-  float64x1_t __ret_482; \
-  __ret_482 = __noswap_splat_laneq_f64(__rev0_482, __p1_482); \
-  __ret_482; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f32(__p0_483, __p1_483) __extension__ ({ \
-  float32x4_t __s0_483 = __p0_483; \
-  float32x2_t __ret_483; \
-  __ret_483 = splat_laneq_f32(__s0_483, __p1_483); \
-  __ret_483; \
-})
-#else
-#define vdup_laneq_f32(__p0_484, __p1_484) __extension__ ({ \
-  float32x4_t __s0_484 = __p0_484; \
-  float32x4_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 3, 2, 1, 0); \
-  float32x2_t __ret_484; \
-  __ret_484 = __noswap_splat_laneq_f32(__rev0_484, __p1_484); \
-  __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 1, 0); \
-  __ret_484; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f16(__p0_485, __p1_485) __extension__ ({ \
-  float16x8_t __s0_485 = __p0_485; \
-  float16x4_t __ret_485; \
-  __ret_485 = splat_laneq_f16(__s0_485, __p1_485); \
-  __ret_485; \
-})
-#else
-#define vdup_laneq_f16(__p0_486, __p1_486) __extension__ ({ \
-  float16x8_t __s0_486 = __p0_486; \
-  float16x8_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_486; \
-  __ret_486 = __noswap_splat_laneq_f16(__rev0_486, __p1_486); \
-  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 3, 2, 1, 0); \
-  __ret_486; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s32(__p0_487, __p1_487) __extension__ ({ \
-  int32x4_t __s0_487 = __p0_487; \
-  int32x2_t __ret_487; \
-  __ret_487 = splat_laneq_s32(__s0_487, __p1_487); \
-  __ret_487; \
-})
-#else
-#define vdup_laneq_s32(__p0_488, __p1_488) __extension__ ({ \
-  int32x4_t __s0_488 = __p0_488; \
-  int32x4_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 3, 2, 1, 0); \
-  int32x2_t __ret_488; \
-  __ret_488 = __noswap_splat_laneq_s32(__rev0_488, __p1_488); \
-  __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \
-  __ret_488; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s64(__p0_489, __p1_489) __extension__ ({ \
-  int64x2_t __s0_489 = __p0_489; \
-  int64x1_t __ret_489; \
-  __ret_489 = splat_laneq_s64(__s0_489, __p1_489); \
-  __ret_489; \
-})
-#else
-#define vdup_laneq_s64(__p0_490, __p1_490) __extension__ ({ \
-  int64x2_t __s0_490 = __p0_490; \
-  int64x2_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 1, 0); \
-  int64x1_t __ret_490; \
-  __ret_490 = __noswap_splat_laneq_s64(__rev0_490, __p1_490); \
-  __ret_490; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s16(__p0_491, __p1_491) __extension__ ({ \
-  int16x8_t __s0_491 = __p0_491; \
-  int16x4_t __ret_491; \
-  __ret_491 = splat_laneq_s16(__s0_491, __p1_491); \
-  __ret_491; \
-})
-#else
-#define vdup_laneq_s16(__p0_492, __p1_492) __extension__ ({ \
-  int16x8_t __s0_492 = __p0_492; \
-  int16x8_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_492; \
-  __ret_492 = __noswap_splat_laneq_s16(__rev0_492, __p1_492); \
-  __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 3, 2, 1, 0); \
-  __ret_492; \
-})
-#endif
-
-__ai poly64x1_t vdup_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdup_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#define vext_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#define vfmsd_lane_f64(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \
-  float64_t __s0_493 = __p0_493; \
-  float64_t __s1_493 = __p1_493; \
-  float64x1_t __s2_493 = __p2_493; \
-  float64_t __ret_493; \
-  __ret_493 = vfmad_lane_f64(__s0_493, -__s1_493, __s2_493, __p3_493); \
-  __ret_493; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_lane_f32(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \
-  float32_t __s0_494 = __p0_494; \
-  float32_t __s1_494 = __p1_494; \
-  float32x2_t __s2_494 = __p2_494; \
-  float32_t __ret_494; \
-  __ret_494 = vfmas_lane_f32(__s0_494, -__s1_494, __s2_494, __p3_494); \
-  __ret_494; \
-})
-#else
-#define vfmss_lane_f32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \
-  float32_t __s0_495 = __p0_495; \
-  float32_t __s1_495 = __p1_495; \
-  float32x2_t __s2_495 = __p2_495; \
-  float32x2_t __rev2_495;  __rev2_495 = __builtin_shufflevector(__s2_495, __s2_495, 1, 0); \
-  float32_t __ret_495; \
-  __ret_495 = __noswap_vfmas_lane_f32(__s0_495, -__s1_495, __rev2_495, __p3_495); \
-  __ret_495; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f64(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \
-  float64x2_t __s0_496 = __p0_496; \
-  float64x2_t __s1_496 = __p1_496; \
-  float64x1_t __s2_496 = __p2_496; \
-  float64x2_t __ret_496; \
-  __ret_496 = vfmaq_lane_f64(__s0_496, -__s1_496, __s2_496, __p3_496); \
-  __ret_496; \
-})
-#else
-#define vfmsq_lane_f64(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \
-  float64x2_t __s0_497 = __p0_497; \
-  float64x2_t __s1_497 = __p1_497; \
-  float64x1_t __s2_497 = __p2_497; \
-  float64x2_t __rev0_497;  __rev0_497 = __builtin_shufflevector(__s0_497, __s0_497, 1, 0); \
-  float64x2_t __rev1_497;  __rev1_497 = __builtin_shufflevector(__s1_497, __s1_497, 1, 0); \
-  float64x2_t __ret_497; \
-  __ret_497 = __noswap_vfmaq_lane_f64(__rev0_497, -__rev1_497, __s2_497, __p3_497); \
-  __ret_497 = __builtin_shufflevector(__ret_497, __ret_497, 1, 0); \
-  __ret_497; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f32(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \
-  float32x4_t __s0_498 = __p0_498; \
-  float32x4_t __s1_498 = __p1_498; \
-  float32x2_t __s2_498 = __p2_498; \
-  float32x4_t __ret_498; \
-  __ret_498 = vfmaq_lane_f32(__s0_498, -__s1_498, __s2_498, __p3_498); \
-  __ret_498; \
-})
-#else
-#define vfmsq_lane_f32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \
-  float32x4_t __s0_499 = __p0_499; \
-  float32x4_t __s1_499 = __p1_499; \
-  float32x2_t __s2_499 = __p2_499; \
-  float32x4_t __rev0_499;  __rev0_499 = __builtin_shufflevector(__s0_499, __s0_499, 3, 2, 1, 0); \
-  float32x4_t __rev1_499;  __rev1_499 = __builtin_shufflevector(__s1_499, __s1_499, 3, 2, 1, 0); \
-  float32x2_t __rev2_499;  __rev2_499 = __builtin_shufflevector(__s2_499, __s2_499, 1, 0); \
-  float32x4_t __ret_499; \
-  __ret_499 = __noswap_vfmaq_lane_f32(__rev0_499, -__rev1_499, __rev2_499, __p3_499); \
-  __ret_499 = __builtin_shufflevector(__ret_499, __ret_499, 3, 2, 1, 0); \
-  __ret_499; \
-})
-#endif
-
-#define vfms_lane_f64(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \
-  float64x1_t __s0_500 = __p0_500; \
-  float64x1_t __s1_500 = __p1_500; \
-  float64x1_t __s2_500 = __p2_500; \
-  float64x1_t __ret_500; \
-  __ret_500 = vfma_lane_f64(__s0_500, -__s1_500, __s2_500, __p3_500); \
-  __ret_500; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f32(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \
-  float32x2_t __s0_501 = __p0_501; \
-  float32x2_t __s1_501 = __p1_501; \
-  float32x2_t __s2_501 = __p2_501; \
-  float32x2_t __ret_501; \
-  __ret_501 = vfma_lane_f32(__s0_501, -__s1_501, __s2_501, __p3_501); \
-  __ret_501; \
-})
-#else
-#define vfms_lane_f32(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \
-  float32x2_t __s0_502 = __p0_502; \
-  float32x2_t __s1_502 = __p1_502; \
-  float32x2_t __s2_502 = __p2_502; \
-  float32x2_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 1, 0); \
-  float32x2_t __rev1_502;  __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 1, 0); \
-  float32x2_t __rev2_502;  __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 1, 0); \
-  float32x2_t __ret_502; \
-  __ret_502 = __noswap_vfma_lane_f32(__rev0_502, -__rev1_502, __rev2_502, __p3_502); \
-  __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 1, 0); \
-  __ret_502; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsd_laneq_f64(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \
-  float64_t __s0_503 = __p0_503; \
-  float64_t __s1_503 = __p1_503; \
-  float64x2_t __s2_503 = __p2_503; \
-  float64_t __ret_503; \
-  __ret_503 = vfmad_laneq_f64(__s0_503, -__s1_503, __s2_503, __p3_503); \
-  __ret_503; \
-})
-#else
-#define vfmsd_laneq_f64(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \
-  float64_t __s0_504 = __p0_504; \
-  float64_t __s1_504 = __p1_504; \
-  float64x2_t __s2_504 = __p2_504; \
-  float64x2_t __rev2_504;  __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 1, 0); \
-  float64_t __ret_504; \
-  __ret_504 = __noswap_vfmad_laneq_f64(__s0_504, -__s1_504, __rev2_504, __p3_504); \
-  __ret_504; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_laneq_f32(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \
-  float32_t __s0_505 = __p0_505; \
-  float32_t __s1_505 = __p1_505; \
-  float32x4_t __s2_505 = __p2_505; \
-  float32_t __ret_505; \
-  __ret_505 = vfmas_laneq_f32(__s0_505, -__s1_505, __s2_505, __p3_505); \
-  __ret_505; \
-})
-#else
-#define vfmss_laneq_f32(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \
-  float32_t __s0_506 = __p0_506; \
-  float32_t __s1_506 = __p1_506; \
-  float32x4_t __s2_506 = __p2_506; \
-  float32x4_t __rev2_506;  __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 3, 2, 1, 0); \
-  float32_t __ret_506; \
-  __ret_506 = __noswap_vfmas_laneq_f32(__s0_506, -__s1_506, __rev2_506, __p3_506); \
-  __ret_506; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f64(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \
-  float64x2_t __s0_507 = __p0_507; \
-  float64x2_t __s1_507 = __p1_507; \
-  float64x2_t __s2_507 = __p2_507; \
-  float64x2_t __ret_507; \
-  __ret_507 = vfmaq_laneq_f64(__s0_507, -__s1_507, __s2_507, __p3_507); \
-  __ret_507; \
-})
-#else
-#define vfmsq_laneq_f64(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \
-  float64x2_t __s0_508 = __p0_508; \
-  float64x2_t __s1_508 = __p1_508; \
-  float64x2_t __s2_508 = __p2_508; \
-  float64x2_t __rev0_508;  __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \
-  float64x2_t __rev1_508;  __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \
-  float64x2_t __rev2_508;  __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 1, 0); \
-  float64x2_t __ret_508; \
-  __ret_508 = __noswap_vfmaq_laneq_f64(__rev0_508, -__rev1_508, __rev2_508, __p3_508); \
-  __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \
-  __ret_508; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f32(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
-  float32x4_t __s0_509 = __p0_509; \
-  float32x4_t __s1_509 = __p1_509; \
-  float32x4_t __s2_509 = __p2_509; \
-  float32x4_t __ret_509; \
-  __ret_509 = vfmaq_laneq_f32(__s0_509, -__s1_509, __s2_509, __p3_509); \
-  __ret_509; \
-})
-#else
-#define vfmsq_laneq_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
-  float32x4_t __s0_510 = __p0_510; \
-  float32x4_t __s1_510 = __p1_510; \
-  float32x4_t __s2_510 = __p2_510; \
-  float32x4_t __rev0_510;  __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \
-  float32x4_t __rev1_510;  __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \
-  float32x4_t __rev2_510;  __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 3, 2, 1, 0); \
-  float32x4_t __ret_510; \
-  __ret_510 = __noswap_vfmaq_laneq_f32(__rev0_510, -__rev1_510, __rev2_510, __p3_510); \
-  __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \
-  __ret_510; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f64(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \
-  float64x1_t __s0_511 = __p0_511; \
-  float64x1_t __s1_511 = __p1_511; \
-  float64x2_t __s2_511 = __p2_511; \
-  float64x1_t __ret_511; \
-  __ret_511 = vfma_laneq_f64(__s0_511, -__s1_511, __s2_511, __p3_511); \
-  __ret_511; \
-})
-#else
-#define vfms_laneq_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \
-  float64x1_t __s0_512 = __p0_512; \
-  float64x1_t __s1_512 = __p1_512; \
-  float64x2_t __s2_512 = __p2_512; \
-  float64x2_t __rev2_512;  __rev2_512 = __builtin_shufflevector(__s2_512, __s2_512, 1, 0); \
-  float64x1_t __ret_512; \
-  __ret_512 = __noswap_vfma_laneq_f64(__s0_512, -__s1_512, __rev2_512, __p3_512); \
-  __ret_512; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \
-  float32x2_t __s0_513 = __p0_513; \
-  float32x2_t __s1_513 = __p1_513; \
-  float32x4_t __s2_513 = __p2_513; \
-  float32x2_t __ret_513; \
-  __ret_513 = vfma_laneq_f32(__s0_513, -__s1_513, __s2_513, __p3_513); \
-  __ret_513; \
-})
-#else
-#define vfms_laneq_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \
-  float32x2_t __s0_514 = __p0_514; \
-  float32x2_t __s1_514 = __p1_514; \
-  float32x4_t __s2_514 = __p2_514; \
-  float32x2_t __rev0_514;  __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 1, 0); \
-  float32x2_t __rev1_514;  __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 1, 0); \
-  float32x4_t __rev2_514;  __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 3, 2, 1, 0); \
-  float32x2_t __ret_514; \
-  __ret_514 = __noswap_vfma_laneq_f32(__rev0_514, -__rev1_514, __rev2_514, __p3_514); \
-  __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 1, 0); \
-  __ret_514; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#define vget_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#define vld1_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_dup_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#define vld1_p64_x2(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x2(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x3(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x3(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x4(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x4(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_dup_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
-  __ret; \
-})
-#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
-  __ret; \
-})
-#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
-  __ret; \
-})
-#define vld3_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_dup_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
-  __ret; \
-})
-#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
-  __ret; \
-})
-#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
-  __ret; \
-})
-#define vld4_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_dup_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
-  __ret; \
-})
-#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
-  __ret; \
-})
-#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
-  __ret; \
-})
-#define vldrq_p128(__p0) __extension__ ({ \
-  poly128_t __ret; \
-  __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u32(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \
-  uint32x4_t __s0_515 = __p0_515; \
-  uint32x4_t __s1_515 = __p1_515; \
-  uint32x4_t __s2_515 = __p2_515; \
-  uint32x4_t __ret_515; \
-  __ret_515 = __s0_515 + __s1_515 * splatq_laneq_u32(__s2_515, __p3_515); \
-  __ret_515; \
-})
-#else
-#define vmlaq_laneq_u32(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \
-  uint32x4_t __s0_516 = __p0_516; \
-  uint32x4_t __s1_516 = __p1_516; \
-  uint32x4_t __s2_516 = __p2_516; \
-  uint32x4_t __rev0_516;  __rev0_516 = __builtin_shufflevector(__s0_516, __s0_516, 3, 2, 1, 0); \
-  uint32x4_t __rev1_516;  __rev1_516 = __builtin_shufflevector(__s1_516, __s1_516, 3, 2, 1, 0); \
-  uint32x4_t __rev2_516;  __rev2_516 = __builtin_shufflevector(__s2_516, __s2_516, 3, 2, 1, 0); \
-  uint32x4_t __ret_516; \
-  __ret_516 = __rev0_516 + __rev1_516 * __noswap_splatq_laneq_u32(__rev2_516, __p3_516); \
-  __ret_516 = __builtin_shufflevector(__ret_516, __ret_516, 3, 2, 1, 0); \
-  __ret_516; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u16(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \
-  uint16x8_t __s0_517 = __p0_517; \
-  uint16x8_t __s1_517 = __p1_517; \
-  uint16x8_t __s2_517 = __p2_517; \
-  uint16x8_t __ret_517; \
-  __ret_517 = __s0_517 + __s1_517 * splatq_laneq_u16(__s2_517, __p3_517); \
-  __ret_517; \
-})
-#else
-#define vmlaq_laneq_u16(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \
-  uint16x8_t __s0_518 = __p0_518; \
-  uint16x8_t __s1_518 = __p1_518; \
-  uint16x8_t __s2_518 = __p2_518; \
-  uint16x8_t __rev0_518;  __rev0_518 = __builtin_shufflevector(__s0_518, __s0_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_518;  __rev1_518 = __builtin_shufflevector(__s1_518, __s1_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_518;  __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_518; \
-  __ret_518 = __rev0_518 + __rev1_518 * __noswap_splatq_laneq_u16(__rev2_518, __p3_518); \
-  __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_518; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_f32(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \
-  float32x4_t __s0_519 = __p0_519; \
-  float32x4_t __s1_519 = __p1_519; \
-  float32x4_t __s2_519 = __p2_519; \
-  float32x4_t __ret_519; \
-  __ret_519 = __s0_519 + __s1_519 * splatq_laneq_f32(__s2_519, __p3_519); \
-  __ret_519; \
-})
-#else
-#define vmlaq_laneq_f32(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \
-  float32x4_t __s0_520 = __p0_520; \
-  float32x4_t __s1_520 = __p1_520; \
-  float32x4_t __s2_520 = __p2_520; \
-  float32x4_t __rev0_520;  __rev0_520 = __builtin_shufflevector(__s0_520, __s0_520, 3, 2, 1, 0); \
-  float32x4_t __rev1_520;  __rev1_520 = __builtin_shufflevector(__s1_520, __s1_520, 3, 2, 1, 0); \
-  float32x4_t __rev2_520;  __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 3, 2, 1, 0); \
-  float32x4_t __ret_520; \
-  __ret_520 = __rev0_520 + __rev1_520 * __noswap_splatq_laneq_f32(__rev2_520, __p3_520); \
-  __ret_520 = __builtin_shufflevector(__ret_520, __ret_520, 3, 2, 1, 0); \
-  __ret_520; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \
-  int32x4_t __s0_521 = __p0_521; \
-  int32x4_t __s1_521 = __p1_521; \
-  int32x4_t __s2_521 = __p2_521; \
-  int32x4_t __ret_521; \
-  __ret_521 = __s0_521 + __s1_521 * splatq_laneq_s32(__s2_521, __p3_521); \
-  __ret_521; \
-})
-#else
-#define vmlaq_laneq_s32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \
-  int32x4_t __s0_522 = __p0_522; \
-  int32x4_t __s1_522 = __p1_522; \
-  int32x4_t __s2_522 = __p2_522; \
-  int32x4_t __rev0_522;  __rev0_522 = __builtin_shufflevector(__s0_522, __s0_522, 3, 2, 1, 0); \
-  int32x4_t __rev1_522;  __rev1_522 = __builtin_shufflevector(__s1_522, __s1_522, 3, 2, 1, 0); \
-  int32x4_t __rev2_522;  __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \
-  int32x4_t __ret_522; \
-  __ret_522 = __rev0_522 + __rev1_522 * __noswap_splatq_laneq_s32(__rev2_522, __p3_522); \
-  __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); \
-  __ret_522; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s16(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \
-  int16x8_t __s0_523 = __p0_523; \
-  int16x8_t __s1_523 = __p1_523; \
-  int16x8_t __s2_523 = __p2_523; \
-  int16x8_t __ret_523; \
-  __ret_523 = __s0_523 + __s1_523 * splatq_laneq_s16(__s2_523, __p3_523); \
-  __ret_523; \
-})
-#else
-#define vmlaq_laneq_s16(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \
-  int16x8_t __s0_524 = __p0_524; \
-  int16x8_t __s1_524 = __p1_524; \
-  int16x8_t __s2_524 = __p2_524; \
-  int16x8_t __rev0_524;  __rev0_524 = __builtin_shufflevector(__s0_524, __s0_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_524;  __rev1_524 = __builtin_shufflevector(__s1_524, __s1_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_524;  __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_524; \
-  __ret_524 = __rev0_524 + __rev1_524 * __noswap_splatq_laneq_s16(__rev2_524, __p3_524); \
-  __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_524; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \
-  uint32x2_t __s0_525 = __p0_525; \
-  uint32x2_t __s1_525 = __p1_525; \
-  uint32x4_t __s2_525 = __p2_525; \
-  uint32x2_t __ret_525; \
-  __ret_525 = __s0_525 + __s1_525 * splat_laneq_u32(__s2_525, __p3_525); \
-  __ret_525; \
-})
-#else
-#define vmla_laneq_u32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \
-  uint32x2_t __s0_526 = __p0_526; \
-  uint32x2_t __s1_526 = __p1_526; \
-  uint32x4_t __s2_526 = __p2_526; \
-  uint32x2_t __rev0_526;  __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 1, 0); \
-  uint32x2_t __rev1_526;  __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 1, 0); \
-  uint32x4_t __rev2_526;  __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \
-  uint32x2_t __ret_526; \
-  __ret_526 = __rev0_526 + __rev1_526 * __noswap_splat_laneq_u32(__rev2_526, __p3_526); \
-  __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 1, 0); \
-  __ret_526; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u16(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \
-  uint16x4_t __s0_527 = __p0_527; \
-  uint16x4_t __s1_527 = __p1_527; \
-  uint16x8_t __s2_527 = __p2_527; \
-  uint16x4_t __ret_527; \
-  __ret_527 = __s0_527 + __s1_527 * splat_laneq_u16(__s2_527, __p3_527); \
-  __ret_527; \
-})
-#else
-#define vmla_laneq_u16(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \
-  uint16x4_t __s0_528 = __p0_528; \
-  uint16x4_t __s1_528 = __p1_528; \
-  uint16x8_t __s2_528 = __p2_528; \
-  uint16x4_t __rev0_528;  __rev0_528 = __builtin_shufflevector(__s0_528, __s0_528, 3, 2, 1, 0); \
-  uint16x4_t __rev1_528;  __rev1_528 = __builtin_shufflevector(__s1_528, __s1_528, 3, 2, 1, 0); \
-  uint16x8_t __rev2_528;  __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_528; \
-  __ret_528 = __rev0_528 + __rev1_528 * __noswap_splat_laneq_u16(__rev2_528, __p3_528); \
-  __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 3, 2, 1, 0); \
-  __ret_528; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_f32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \
-  float32x2_t __s0_529 = __p0_529; \
-  float32x2_t __s1_529 = __p1_529; \
-  float32x4_t __s2_529 = __p2_529; \
-  float32x2_t __ret_529; \
-  __ret_529 = __s0_529 + __s1_529 * splat_laneq_f32(__s2_529, __p3_529); \
-  __ret_529; \
-})
-#else
-#define vmla_laneq_f32(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \
-  float32x2_t __s0_530 = __p0_530; \
-  float32x2_t __s1_530 = __p1_530; \
-  float32x4_t __s2_530 = __p2_530; \
-  float32x2_t __rev0_530;  __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 1, 0); \
-  float32x2_t __rev1_530;  __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 1, 0); \
-  float32x4_t __rev2_530;  __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 3, 2, 1, 0); \
-  float32x2_t __ret_530; \
-  __ret_530 = __rev0_530 + __rev1_530 * __noswap_splat_laneq_f32(__rev2_530, __p3_530); \
-  __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 1, 0); \
-  __ret_530; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \
-  int32x2_t __s0_531 = __p0_531; \
-  int32x2_t __s1_531 = __p1_531; \
-  int32x4_t __s2_531 = __p2_531; \
-  int32x2_t __ret_531; \
-  __ret_531 = __s0_531 + __s1_531 * splat_laneq_s32(__s2_531, __p3_531); \
-  __ret_531; \
-})
-#else
-#define vmla_laneq_s32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \
-  int32x2_t __s0_532 = __p0_532; \
-  int32x2_t __s1_532 = __p1_532; \
-  int32x4_t __s2_532 = __p2_532; \
-  int32x2_t __rev0_532;  __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 1, 0); \
-  int32x2_t __rev1_532;  __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 1, 0); \
-  int32x4_t __rev2_532;  __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \
-  int32x2_t __ret_532; \
-  __ret_532 = __rev0_532 + __rev1_532 * __noswap_splat_laneq_s32(__rev2_532, __p3_532); \
-  __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 1, 0); \
-  __ret_532; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s16(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \
-  int16x4_t __s0_533 = __p0_533; \
-  int16x4_t __s1_533 = __p1_533; \
-  int16x8_t __s2_533 = __p2_533; \
-  int16x4_t __ret_533; \
-  __ret_533 = __s0_533 + __s1_533 * splat_laneq_s16(__s2_533, __p3_533); \
-  __ret_533; \
-})
-#else
-#define vmla_laneq_s16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \
-  int16x4_t __s0_534 = __p0_534; \
-  int16x4_t __s1_534 = __p1_534; \
-  int16x8_t __s2_534 = __p2_534; \
-  int16x4_t __rev0_534;  __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 3, 2, 1, 0); \
-  int16x4_t __rev1_534;  __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 3, 2, 1, 0); \
-  int16x8_t __rev2_534;  __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_534; \
-  __ret_534 = __rev0_534 + __rev1_534 * __noswap_splat_laneq_s16(__rev2_534, __p3_534); \
-  __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 3, 2, 1, 0); \
-  __ret_534; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u32(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \
-  uint64x2_t __s0_535 = __p0_535; \
-  uint32x4_t __s1_535 = __p1_535; \
-  uint32x2_t __s2_535 = __p2_535; \
-  uint64x2_t __ret_535; \
-  __ret_535 = __s0_535 + vmull_u32(vget_high_u32(__s1_535), splat_lane_u32(__s2_535, __p3_535)); \
-  __ret_535; \
-})
-#else
-#define vmlal_high_lane_u32(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \
-  uint64x2_t __s0_536 = __p0_536; \
-  uint32x4_t __s1_536 = __p1_536; \
-  uint32x2_t __s2_536 = __p2_536; \
-  uint64x2_t __rev0_536;  __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 1, 0); \
-  uint32x4_t __rev1_536;  __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 3, 2, 1, 0); \
-  uint32x2_t __rev2_536;  __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 1, 0); \
-  uint64x2_t __ret_536; \
-  __ret_536 = __rev0_536 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_536), __noswap_splat_lane_u32(__rev2_536, __p3_536)); \
-  __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 1, 0); \
-  __ret_536; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u16(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \
-  uint32x4_t __s0_537 = __p0_537; \
-  uint16x8_t __s1_537 = __p1_537; \
-  uint16x4_t __s2_537 = __p2_537; \
-  uint32x4_t __ret_537; \
-  __ret_537 = __s0_537 + vmull_u16(vget_high_u16(__s1_537), splat_lane_u16(__s2_537, __p3_537)); \
-  __ret_537; \
-})
-#else
-#define vmlal_high_lane_u16(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \
-  uint32x4_t __s0_538 = __p0_538; \
-  uint16x8_t __s1_538 = __p1_538; \
-  uint16x4_t __s2_538 = __p2_538; \
-  uint32x4_t __rev0_538;  __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 3, 2, 1, 0); \
-  uint16x8_t __rev1_538;  __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_538;  __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \
-  uint32x4_t __ret_538; \
-  __ret_538 = __rev0_538 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_538), __noswap_splat_lane_u16(__rev2_538, __p3_538)); \
-  __ret_538 = __builtin_shufflevector(__ret_538, __ret_538, 3, 2, 1, 0); \
-  __ret_538; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s32(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \
-  int64x2_t __s0_539 = __p0_539; \
-  int32x4_t __s1_539 = __p1_539; \
-  int32x2_t __s2_539 = __p2_539; \
-  int64x2_t __ret_539; \
-  __ret_539 = __s0_539 + vmull_s32(vget_high_s32(__s1_539), splat_lane_s32(__s2_539, __p3_539)); \
-  __ret_539; \
-})
-#else
-#define vmlal_high_lane_s32(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \
-  int64x2_t __s0_540 = __p0_540; \
-  int32x4_t __s1_540 = __p1_540; \
-  int32x2_t __s2_540 = __p2_540; \
-  int64x2_t __rev0_540;  __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 1, 0); \
-  int32x4_t __rev1_540;  __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 3, 2, 1, 0); \
-  int32x2_t __rev2_540;  __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 1, 0); \
-  int64x2_t __ret_540; \
-  __ret_540 = __rev0_540 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_540), __noswap_splat_lane_s32(__rev2_540, __p3_540)); \
-  __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 1, 0); \
-  __ret_540; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s16(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \
-  int32x4_t __s0_541 = __p0_541; \
-  int16x8_t __s1_541 = __p1_541; \
-  int16x4_t __s2_541 = __p2_541; \
-  int32x4_t __ret_541; \
-  __ret_541 = __s0_541 + vmull_s16(vget_high_s16(__s1_541), splat_lane_s16(__s2_541, __p3_541)); \
-  __ret_541; \
-})
-#else
-#define vmlal_high_lane_s16(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \
-  int32x4_t __s0_542 = __p0_542; \
-  int16x8_t __s1_542 = __p1_542; \
-  int16x4_t __s2_542 = __p2_542; \
-  int32x4_t __rev0_542;  __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 3, 2, 1, 0); \
-  int16x8_t __rev1_542;  __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_542;  __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \
-  int32x4_t __ret_542; \
-  __ret_542 = __rev0_542 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_542), __noswap_splat_lane_s16(__rev2_542, __p3_542)); \
-  __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 3, 2, 1, 0); \
-  __ret_542; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u32(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \
-  uint64x2_t __s0_543 = __p0_543; \
-  uint32x4_t __s1_543 = __p1_543; \
-  uint32x4_t __s2_543 = __p2_543; \
-  uint64x2_t __ret_543; \
-  __ret_543 = __s0_543 + vmull_u32(vget_high_u32(__s1_543), splat_laneq_u32(__s2_543, __p3_543)); \
-  __ret_543; \
-})
-#else
-#define vmlal_high_laneq_u32(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \
-  uint64x2_t __s0_544 = __p0_544; \
-  uint32x4_t __s1_544 = __p1_544; \
-  uint32x4_t __s2_544 = __p2_544; \
-  uint64x2_t __rev0_544;  __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 1, 0); \
-  uint32x4_t __rev1_544;  __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 3, 2, 1, 0); \
-  uint32x4_t __rev2_544;  __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 3, 2, 1, 0); \
-  uint64x2_t __ret_544; \
-  __ret_544 = __rev0_544 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_544), __noswap_splat_laneq_u32(__rev2_544, __p3_544)); \
-  __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 1, 0); \
-  __ret_544; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u16(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \
-  uint32x4_t __s0_545 = __p0_545; \
-  uint16x8_t __s1_545 = __p1_545; \
-  uint16x8_t __s2_545 = __p2_545; \
-  uint32x4_t __ret_545; \
-  __ret_545 = __s0_545 + vmull_u16(vget_high_u16(__s1_545), splat_laneq_u16(__s2_545, __p3_545)); \
-  __ret_545; \
-})
-#else
-#define vmlal_high_laneq_u16(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \
-  uint32x4_t __s0_546 = __p0_546; \
-  uint16x8_t __s1_546 = __p1_546; \
-  uint16x8_t __s2_546 = __p2_546; \
-  uint32x4_t __rev0_546;  __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 3, 2, 1, 0); \
-  uint16x8_t __rev1_546;  __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_546;  __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_546; \
-  __ret_546 = __rev0_546 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_546), __noswap_splat_laneq_u16(__rev2_546, __p3_546)); \
-  __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 3, 2, 1, 0); \
-  __ret_546; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \
-  int64x2_t __s0_547 = __p0_547; \
-  int32x4_t __s1_547 = __p1_547; \
-  int32x4_t __s2_547 = __p2_547; \
-  int64x2_t __ret_547; \
-  __ret_547 = __s0_547 + vmull_s32(vget_high_s32(__s1_547), splat_laneq_s32(__s2_547, __p3_547)); \
-  __ret_547; \
-})
-#else
-#define vmlal_high_laneq_s32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \
-  int64x2_t __s0_548 = __p0_548; \
-  int32x4_t __s1_548 = __p1_548; \
-  int32x4_t __s2_548 = __p2_548; \
-  int64x2_t __rev0_548;  __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \
-  int32x4_t __rev1_548;  __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 3, 2, 1, 0); \
-  int32x4_t __rev2_548;  __rev2_548 = __builtin_shufflevector(__s2_548, __s2_548, 3, 2, 1, 0); \
-  int64x2_t __ret_548; \
-  __ret_548 = __rev0_548 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_548), __noswap_splat_laneq_s32(__rev2_548, __p3_548)); \
-  __ret_548 = __builtin_shufflevector(__ret_548, __ret_548, 1, 0); \
-  __ret_548; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \
-  int32x4_t __s0_549 = __p0_549; \
-  int16x8_t __s1_549 = __p1_549; \
-  int16x8_t __s2_549 = __p2_549; \
-  int32x4_t __ret_549; \
-  __ret_549 = __s0_549 + vmull_s16(vget_high_s16(__s1_549), splat_laneq_s16(__s2_549, __p3_549)); \
-  __ret_549; \
-})
-#else
-#define vmlal_high_laneq_s16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \
-  int32x4_t __s0_550 = __p0_550; \
-  int16x8_t __s1_550 = __p1_550; \
-  int16x8_t __s2_550 = __p2_550; \
-  int32x4_t __rev0_550;  __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \
-  int16x8_t __rev1_550;  __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_550;  __rev2_550 = __builtin_shufflevector(__s2_550, __s2_550, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_550; \
-  __ret_550 = __rev0_550 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_550), __noswap_splat_laneq_s16(__rev2_550, __p3_550)); \
-  __ret_550 = __builtin_shufflevector(__ret_550, __ret_550, 3, 2, 1, 0); \
-  __ret_550; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \
-  uint64x2_t __s0_551 = __p0_551; \
-  uint32x2_t __s1_551 = __p1_551; \
-  uint32x4_t __s2_551 = __p2_551; \
-  uint64x2_t __ret_551; \
-  __ret_551 = __s0_551 + vmull_u32(__s1_551, splat_laneq_u32(__s2_551, __p3_551)); \
-  __ret_551; \
-})
-#else
-#define vmlal_laneq_u32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \
-  uint64x2_t __s0_552 = __p0_552; \
-  uint32x2_t __s1_552 = __p1_552; \
-  uint32x4_t __s2_552 = __p2_552; \
-  uint64x2_t __rev0_552;  __rev0_552 = __builtin_shufflevector(__s0_552, __s0_552, 1, 0); \
-  uint32x2_t __rev1_552;  __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 1, 0); \
-  uint32x4_t __rev2_552;  __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 3, 2, 1, 0); \
-  uint64x2_t __ret_552; \
-  __ret_552 = __rev0_552 + __noswap_vmull_u32(__rev1_552, __noswap_splat_laneq_u32(__rev2_552, __p3_552)); \
-  __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 1, 0); \
-  __ret_552; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \
-  uint32x4_t __s0_553 = __p0_553; \
-  uint16x4_t __s1_553 = __p1_553; \
-  uint16x8_t __s2_553 = __p2_553; \
-  uint32x4_t __ret_553; \
-  __ret_553 = __s0_553 + vmull_u16(__s1_553, splat_laneq_u16(__s2_553, __p3_553)); \
-  __ret_553; \
-})
-#else
-#define vmlal_laneq_u16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \
-  uint32x4_t __s0_554 = __p0_554; \
-  uint16x4_t __s1_554 = __p1_554; \
-  uint16x8_t __s2_554 = __p2_554; \
-  uint32x4_t __rev0_554;  __rev0_554 = __builtin_shufflevector(__s0_554, __s0_554, 3, 2, 1, 0); \
-  uint16x4_t __rev1_554;  __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 3, 2, 1, 0); \
-  uint16x8_t __rev2_554;  __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_554; \
-  __ret_554 = __rev0_554 + __noswap_vmull_u16(__rev1_554, __noswap_splat_laneq_u16(__rev2_554, __p3_554)); \
-  __ret_554 = __builtin_shufflevector(__ret_554, __ret_554, 3, 2, 1, 0); \
-  __ret_554; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \
-  int64x2_t __s0_555 = __p0_555; \
-  int32x2_t __s1_555 = __p1_555; \
-  int32x4_t __s2_555 = __p2_555; \
-  int64x2_t __ret_555; \
-  __ret_555 = __s0_555 + vmull_s32(__s1_555, splat_laneq_s32(__s2_555, __p3_555)); \
-  __ret_555; \
-})
-#else
-#define vmlal_laneq_s32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \
-  int64x2_t __s0_556 = __p0_556; \
-  int32x2_t __s1_556 = __p1_556; \
-  int32x4_t __s2_556 = __p2_556; \
-  int64x2_t __rev0_556;  __rev0_556 = __builtin_shufflevector(__s0_556, __s0_556, 1, 0); \
-  int32x2_t __rev1_556;  __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 1, 0); \
-  int32x4_t __rev2_556;  __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 3, 2, 1, 0); \
-  int64x2_t __ret_556; \
-  __ret_556 = __rev0_556 + __noswap_vmull_s32(__rev1_556, __noswap_splat_laneq_s32(__rev2_556, __p3_556)); \
-  __ret_556 = __builtin_shufflevector(__ret_556, __ret_556, 1, 0); \
-  __ret_556; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \
-  int32x4_t __s0_557 = __p0_557; \
-  int16x4_t __s1_557 = __p1_557; \
-  int16x8_t __s2_557 = __p2_557; \
-  int32x4_t __ret_557; \
-  __ret_557 = __s0_557 + vmull_s16(__s1_557, splat_laneq_s16(__s2_557, __p3_557)); \
-  __ret_557; \
-})
-#else
-#define vmlal_laneq_s16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \
-  int32x4_t __s0_558 = __p0_558; \
-  int16x4_t __s1_558 = __p1_558; \
-  int16x8_t __s2_558 = __p2_558; \
-  int32x4_t __rev0_558;  __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 3, 2, 1, 0); \
-  int16x4_t __rev1_558;  __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 3, 2, 1, 0); \
-  int16x8_t __rev2_558;  __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_558; \
-  __ret_558 = __rev0_558 + __noswap_vmull_s16(__rev1_558, __noswap_splat_laneq_s16(__rev2_558, __p3_558)); \
-  __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 3, 2, 1, 0); \
-  __ret_558; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \
-  uint32x4_t __s0_559 = __p0_559; \
-  uint32x4_t __s1_559 = __p1_559; \
-  uint32x4_t __s2_559 = __p2_559; \
-  uint32x4_t __ret_559; \
-  __ret_559 = __s0_559 - __s1_559 * splatq_laneq_u32(__s2_559, __p3_559); \
-  __ret_559; \
-})
-#else
-#define vmlsq_laneq_u32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \
-  uint32x4_t __s0_560 = __p0_560; \
-  uint32x4_t __s1_560 = __p1_560; \
-  uint32x4_t __s2_560 = __p2_560; \
-  uint32x4_t __rev0_560;  __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 3, 2, 1, 0); \
-  uint32x4_t __rev1_560;  __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \
-  uint32x4_t __rev2_560;  __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \
-  uint32x4_t __ret_560; \
-  __ret_560 = __rev0_560 - __rev1_560 * __noswap_splatq_laneq_u32(__rev2_560, __p3_560); \
-  __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 3, 2, 1, 0); \
-  __ret_560; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \
-  uint16x8_t __s0_561 = __p0_561; \
-  uint16x8_t __s1_561 = __p1_561; \
-  uint16x8_t __s2_561 = __p2_561; \
-  uint16x8_t __ret_561; \
-  __ret_561 = __s0_561 - __s1_561 * splatq_laneq_u16(__s2_561, __p3_561); \
-  __ret_561; \
-})
-#else
-#define vmlsq_laneq_u16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \
-  uint16x8_t __s0_562 = __p0_562; \
-  uint16x8_t __s1_562 = __p1_562; \
-  uint16x8_t __s2_562 = __p2_562; \
-  uint16x8_t __rev0_562;  __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_562;  __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_562;  __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_562; \
-  __ret_562 = __rev0_562 - __rev1_562 * __noswap_splatq_laneq_u16(__rev2_562, __p3_562); \
-  __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_562; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_f32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \
-  float32x4_t __s0_563 = __p0_563; \
-  float32x4_t __s1_563 = __p1_563; \
-  float32x4_t __s2_563 = __p2_563; \
-  float32x4_t __ret_563; \
-  __ret_563 = __s0_563 - __s1_563 * splatq_laneq_f32(__s2_563, __p3_563); \
-  __ret_563; \
-})
-#else
-#define vmlsq_laneq_f32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \
-  float32x4_t __s0_564 = __p0_564; \
-  float32x4_t __s1_564 = __p1_564; \
-  float32x4_t __s2_564 = __p2_564; \
-  float32x4_t __rev0_564;  __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 3, 2, 1, 0); \
-  float32x4_t __rev1_564;  __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \
-  float32x4_t __rev2_564;  __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \
-  float32x4_t __ret_564; \
-  __ret_564 = __rev0_564 - __rev1_564 * __noswap_splatq_laneq_f32(__rev2_564, __p3_564); \
-  __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 3, 2, 1, 0); \
-  __ret_564; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s32(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \
-  int32x4_t __s0_565 = __p0_565; \
-  int32x4_t __s1_565 = __p1_565; \
-  int32x4_t __s2_565 = __p2_565; \
-  int32x4_t __ret_565; \
-  __ret_565 = __s0_565 - __s1_565 * splatq_laneq_s32(__s2_565, __p3_565); \
-  __ret_565; \
-})
-#else
-#define vmlsq_laneq_s32(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \
-  int32x4_t __s0_566 = __p0_566; \
-  int32x4_t __s1_566 = __p1_566; \
-  int32x4_t __s2_566 = __p2_566; \
-  int32x4_t __rev0_566;  __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \
-  int32x4_t __rev1_566;  __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 3, 2, 1, 0); \
-  int32x4_t __rev2_566;  __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 3, 2, 1, 0); \
-  int32x4_t __ret_566; \
-  __ret_566 = __rev0_566 - __rev1_566 * __noswap_splatq_laneq_s32(__rev2_566, __p3_566); \
-  __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \
-  __ret_566; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s16(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \
-  int16x8_t __s0_567 = __p0_567; \
-  int16x8_t __s1_567 = __p1_567; \
-  int16x8_t __s2_567 = __p2_567; \
-  int16x8_t __ret_567; \
-  __ret_567 = __s0_567 - __s1_567 * splatq_laneq_s16(__s2_567, __p3_567); \
-  __ret_567; \
-})
-#else
-#define vmlsq_laneq_s16(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \
-  int16x8_t __s0_568 = __p0_568; \
-  int16x8_t __s1_568 = __p1_568; \
-  int16x8_t __s2_568 = __p2_568; \
-  int16x8_t __rev0_568;  __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_568;  __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_568;  __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_568; \
-  __ret_568 = __rev0_568 - __rev1_568 * __noswap_splatq_laneq_s16(__rev2_568, __p3_568); \
-  __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_568; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u32(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \
-  uint32x2_t __s0_569 = __p0_569; \
-  uint32x2_t __s1_569 = __p1_569; \
-  uint32x4_t __s2_569 = __p2_569; \
-  uint32x2_t __ret_569; \
-  __ret_569 = __s0_569 - __s1_569 * splat_laneq_u32(__s2_569, __p3_569); \
-  __ret_569; \
-})
-#else
-#define vmls_laneq_u32(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \
-  uint32x2_t __s0_570 = __p0_570; \
-  uint32x2_t __s1_570 = __p1_570; \
-  uint32x4_t __s2_570 = __p2_570; \
-  uint32x2_t __rev0_570;  __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 1, 0); \
-  uint32x2_t __rev1_570;  __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 1, 0); \
-  uint32x4_t __rev2_570;  __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 3, 2, 1, 0); \
-  uint32x2_t __ret_570; \
-  __ret_570 = __rev0_570 - __rev1_570 * __noswap_splat_laneq_u32(__rev2_570, __p3_570); \
-  __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 1, 0); \
-  __ret_570; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u16(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \
-  uint16x4_t __s0_571 = __p0_571; \
-  uint16x4_t __s1_571 = __p1_571; \
-  uint16x8_t __s2_571 = __p2_571; \
-  uint16x4_t __ret_571; \
-  __ret_571 = __s0_571 - __s1_571 * splat_laneq_u16(__s2_571, __p3_571); \
-  __ret_571; \
-})
-#else
-#define vmls_laneq_u16(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \
-  uint16x4_t __s0_572 = __p0_572; \
-  uint16x4_t __s1_572 = __p1_572; \
-  uint16x8_t __s2_572 = __p2_572; \
-  uint16x4_t __rev0_572;  __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 3, 2, 1, 0); \
-  uint16x4_t __rev1_572;  __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 3, 2, 1, 0); \
-  uint16x8_t __rev2_572;  __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_572; \
-  __ret_572 = __rev0_572 - __rev1_572 * __noswap_splat_laneq_u16(__rev2_572, __p3_572); \
-  __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 3, 2, 1, 0); \
-  __ret_572; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_f32(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \
-  float32x2_t __s0_573 = __p0_573; \
-  float32x2_t __s1_573 = __p1_573; \
-  float32x4_t __s2_573 = __p2_573; \
-  float32x2_t __ret_573; \
-  __ret_573 = __s0_573 - __s1_573 * splat_laneq_f32(__s2_573, __p3_573); \
-  __ret_573; \
-})
-#else
-#define vmls_laneq_f32(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \
-  float32x2_t __s0_574 = __p0_574; \
-  float32x2_t __s1_574 = __p1_574; \
-  float32x4_t __s2_574 = __p2_574; \
-  float32x2_t __rev0_574;  __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 1, 0); \
-  float32x2_t __rev1_574;  __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 1, 0); \
-  float32x4_t __rev2_574;  __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 3, 2, 1, 0); \
-  float32x2_t __ret_574; \
-  __ret_574 = __rev0_574 - __rev1_574 * __noswap_splat_laneq_f32(__rev2_574, __p3_574); \
-  __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 1, 0); \
-  __ret_574; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \
-  int32x2_t __s0_575 = __p0_575; \
-  int32x2_t __s1_575 = __p1_575; \
-  int32x4_t __s2_575 = __p2_575; \
-  int32x2_t __ret_575; \
-  __ret_575 = __s0_575 - __s1_575 * splat_laneq_s32(__s2_575, __p3_575); \
-  __ret_575; \
-})
-#else
-#define vmls_laneq_s32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \
-  int32x2_t __s0_576 = __p0_576; \
-  int32x2_t __s1_576 = __p1_576; \
-  int32x4_t __s2_576 = __p2_576; \
-  int32x2_t __rev0_576;  __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 1, 0); \
-  int32x2_t __rev1_576;  __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 1, 0); \
-  int32x4_t __rev2_576;  __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \
-  int32x2_t __ret_576; \
-  __ret_576 = __rev0_576 - __rev1_576 * __noswap_splat_laneq_s32(__rev2_576, __p3_576); \
-  __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 1, 0); \
-  __ret_576; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s16(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \
-  int16x4_t __s0_577 = __p0_577; \
-  int16x4_t __s1_577 = __p1_577; \
-  int16x8_t __s2_577 = __p2_577; \
-  int16x4_t __ret_577; \
-  __ret_577 = __s0_577 - __s1_577 * splat_laneq_s16(__s2_577, __p3_577); \
-  __ret_577; \
-})
-#else
-#define vmls_laneq_s16(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \
-  int16x4_t __s0_578 = __p0_578; \
-  int16x4_t __s1_578 = __p1_578; \
-  int16x8_t __s2_578 = __p2_578; \
-  int16x4_t __rev0_578;  __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 3, 2, 1, 0); \
-  int16x4_t __rev1_578;  __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 3, 2, 1, 0); \
-  int16x8_t __rev2_578;  __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_578; \
-  __ret_578 = __rev0_578 - __rev1_578 * __noswap_splat_laneq_s16(__rev2_578, __p3_578); \
-  __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 3, 2, 1, 0); \
-  __ret_578; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u32(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \
-  uint64x2_t __s0_579 = __p0_579; \
-  uint32x4_t __s1_579 = __p1_579; \
-  uint32x2_t __s2_579 = __p2_579; \
-  uint64x2_t __ret_579; \
-  __ret_579 = __s0_579 - vmull_u32(vget_high_u32(__s1_579), splat_lane_u32(__s2_579, __p3_579)); \
-  __ret_579; \
-})
-#else
-#define vmlsl_high_lane_u32(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \
-  uint64x2_t __s0_580 = __p0_580; \
-  uint32x4_t __s1_580 = __p1_580; \
-  uint32x2_t __s2_580 = __p2_580; \
-  uint64x2_t __rev0_580;  __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 1, 0); \
-  uint32x4_t __rev1_580;  __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \
-  uint32x2_t __rev2_580;  __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 1, 0); \
-  uint64x2_t __ret_580; \
-  __ret_580 = __rev0_580 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_580), __noswap_splat_lane_u32(__rev2_580, __p3_580)); \
-  __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 1, 0); \
-  __ret_580; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u16(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \
-  uint32x4_t __s0_581 = __p0_581; \
-  uint16x8_t __s1_581 = __p1_581; \
-  uint16x4_t __s2_581 = __p2_581; \
-  uint32x4_t __ret_581; \
-  __ret_581 = __s0_581 - vmull_u16(vget_high_u16(__s1_581), splat_lane_u16(__s2_581, __p3_581)); \
-  __ret_581; \
-})
-#else
-#define vmlsl_high_lane_u16(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \
-  uint32x4_t __s0_582 = __p0_582; \
-  uint16x8_t __s1_582 = __p1_582; \
-  uint16x4_t __s2_582 = __p2_582; \
-  uint32x4_t __rev0_582;  __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 3, 2, 1, 0); \
-  uint16x8_t __rev1_582;  __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_582;  __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \
-  uint32x4_t __ret_582; \
-  __ret_582 = __rev0_582 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_582), __noswap_splat_lane_u16(__rev2_582, __p3_582)); \
-  __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 3, 2, 1, 0); \
-  __ret_582; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s32(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \
-  int64x2_t __s0_583 = __p0_583; \
-  int32x4_t __s1_583 = __p1_583; \
-  int32x2_t __s2_583 = __p2_583; \
-  int64x2_t __ret_583; \
-  __ret_583 = __s0_583 - vmull_s32(vget_high_s32(__s1_583), splat_lane_s32(__s2_583, __p3_583)); \
-  __ret_583; \
-})
-#else
-#define vmlsl_high_lane_s32(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \
-  int64x2_t __s0_584 = __p0_584; \
-  int32x4_t __s1_584 = __p1_584; \
-  int32x2_t __s2_584 = __p2_584; \
-  int64x2_t __rev0_584;  __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 1, 0); \
-  int32x4_t __rev1_584;  __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 3, 2, 1, 0); \
-  int32x2_t __rev2_584;  __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 1, 0); \
-  int64x2_t __ret_584; \
-  __ret_584 = __rev0_584 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_584), __noswap_splat_lane_s32(__rev2_584, __p3_584)); \
-  __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 1, 0); \
-  __ret_584; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s16(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \
-  int32x4_t __s0_585 = __p0_585; \
-  int16x8_t __s1_585 = __p1_585; \
-  int16x4_t __s2_585 = __p2_585; \
-  int32x4_t __ret_585; \
-  __ret_585 = __s0_585 - vmull_s16(vget_high_s16(__s1_585), splat_lane_s16(__s2_585, __p3_585)); \
-  __ret_585; \
-})
-#else
-#define vmlsl_high_lane_s16(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \
-  int32x4_t __s0_586 = __p0_586; \
-  int16x8_t __s1_586 = __p1_586; \
-  int16x4_t __s2_586 = __p2_586; \
-  int32x4_t __rev0_586;  __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 3, 2, 1, 0); \
-  int16x8_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_586;  __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \
-  int32x4_t __ret_586; \
-  __ret_586 = __rev0_586 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_586), __noswap_splat_lane_s16(__rev2_586, __p3_586)); \
-  __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 3, 2, 1, 0); \
-  __ret_586; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u32(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \
-  uint64x2_t __s0_587 = __p0_587; \
-  uint32x4_t __s1_587 = __p1_587; \
-  uint32x4_t __s2_587 = __p2_587; \
-  uint64x2_t __ret_587; \
-  __ret_587 = __s0_587 - vmull_u32(vget_high_u32(__s1_587), splat_laneq_u32(__s2_587, __p3_587)); \
-  __ret_587; \
-})
-#else
-#define vmlsl_high_laneq_u32(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \
-  uint64x2_t __s0_588 = __p0_588; \
-  uint32x4_t __s1_588 = __p1_588; \
-  uint32x4_t __s2_588 = __p2_588; \
-  uint64x2_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 1, 0); \
-  uint32x4_t __rev1_588;  __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \
-  uint32x4_t __rev2_588;  __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 3, 2, 1, 0); \
-  uint64x2_t __ret_588; \
-  __ret_588 = __rev0_588 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_588), __noswap_splat_laneq_u32(__rev2_588, __p3_588)); \
-  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 1, 0); \
-  __ret_588; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u16(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \
-  uint32x4_t __s0_589 = __p0_589; \
-  uint16x8_t __s1_589 = __p1_589; \
-  uint16x8_t __s2_589 = __p2_589; \
-  uint32x4_t __ret_589; \
-  __ret_589 = __s0_589 - vmull_u16(vget_high_u16(__s1_589), splat_laneq_u16(__s2_589, __p3_589)); \
-  __ret_589; \
-})
-#else
-#define vmlsl_high_laneq_u16(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \
-  uint32x4_t __s0_590 = __p0_590; \
-  uint16x8_t __s1_590 = __p1_590; \
-  uint16x8_t __s2_590 = __p2_590; \
-  uint32x4_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \
-  uint16x8_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_590;  __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_590; \
-  __ret_590 = __rev0_590 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_590), __noswap_splat_laneq_u16(__rev2_590, __p3_590)); \
-  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 3, 2, 1, 0); \
-  __ret_590; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \
-  int64x2_t __s0_591 = __p0_591; \
-  int32x4_t __s1_591 = __p1_591; \
-  int32x4_t __s2_591 = __p2_591; \
-  int64x2_t __ret_591; \
-  __ret_591 = __s0_591 - vmull_s32(vget_high_s32(__s1_591), splat_laneq_s32(__s2_591, __p3_591)); \
-  __ret_591; \
-})
-#else
-#define vmlsl_high_laneq_s32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \
-  int64x2_t __s0_592 = __p0_592; \
-  int32x4_t __s1_592 = __p1_592; \
-  int32x4_t __s2_592 = __p2_592; \
-  int64x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
-  int32x4_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 3, 2, 1, 0); \
-  int32x4_t __rev2_592;  __rev2_592 = __builtin_shufflevector(__s2_592, __s2_592, 3, 2, 1, 0); \
-  int64x2_t __ret_592; \
-  __ret_592 = __rev0_592 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_592), __noswap_splat_laneq_s32(__rev2_592, __p3_592)); \
-  __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \
-  __ret_592; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \
-  int32x4_t __s0_593 = __p0_593; \
-  int16x8_t __s1_593 = __p1_593; \
-  int16x8_t __s2_593 = __p2_593; \
-  int32x4_t __ret_593; \
-  __ret_593 = __s0_593 - vmull_s16(vget_high_s16(__s1_593), splat_laneq_s16(__s2_593, __p3_593)); \
-  __ret_593; \
-})
-#else
-#define vmlsl_high_laneq_s16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \
-  int32x4_t __s0_594 = __p0_594; \
-  int16x8_t __s1_594 = __p1_594; \
-  int16x8_t __s2_594 = __p2_594; \
-  int32x4_t __rev0_594;  __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \
-  int16x8_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_594;  __rev2_594 = __builtin_shufflevector(__s2_594, __s2_594, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_594; \
-  __ret_594 = __rev0_594 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_594), __noswap_splat_laneq_s16(__rev2_594, __p3_594)); \
-  __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \
-  __ret_594; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \
-  uint64x2_t __s0_595 = __p0_595; \
-  uint32x2_t __s1_595 = __p1_595; \
-  uint32x4_t __s2_595 = __p2_595; \
-  uint64x2_t __ret_595; \
-  __ret_595 = __s0_595 - vmull_u32(__s1_595, splat_laneq_u32(__s2_595, __p3_595)); \
-  __ret_595; \
-})
-#else
-#define vmlsl_laneq_u32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \
-  uint64x2_t __s0_596 = __p0_596; \
-  uint32x2_t __s1_596 = __p1_596; \
-  uint32x4_t __s2_596 = __p2_596; \
-  uint64x2_t __rev0_596;  __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \
-  uint32x2_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 1, 0); \
-  uint32x4_t __rev2_596;  __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 3, 2, 1, 0); \
-  uint64x2_t __ret_596; \
-  __ret_596 = __rev0_596 - __noswap_vmull_u32(__rev1_596, __noswap_splat_laneq_u32(__rev2_596, __p3_596)); \
-  __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \
-  __ret_596; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \
-  uint32x4_t __s0_597 = __p0_597; \
-  uint16x4_t __s1_597 = __p1_597; \
-  uint16x8_t __s2_597 = __p2_597; \
-  uint32x4_t __ret_597; \
-  __ret_597 = __s0_597 - vmull_u16(__s1_597, splat_laneq_u16(__s2_597, __p3_597)); \
-  __ret_597; \
-})
-#else
-#define vmlsl_laneq_u16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \
-  uint32x4_t __s0_598 = __p0_598; \
-  uint16x4_t __s1_598 = __p1_598; \
-  uint16x8_t __s2_598 = __p2_598; \
-  uint32x4_t __rev0_598;  __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \
-  uint16x4_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 3, 2, 1, 0); \
-  uint16x8_t __rev2_598;  __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_598; \
-  __ret_598 = __rev0_598 - __noswap_vmull_u16(__rev1_598, __noswap_splat_laneq_u16(__rev2_598, __p3_598)); \
-  __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 3, 2, 1, 0); \
-  __ret_598; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \
-  int64x2_t __s0_599 = __p0_599; \
-  int32x2_t __s1_599 = __p1_599; \
-  int32x4_t __s2_599 = __p2_599; \
-  int64x2_t __ret_599; \
-  __ret_599 = __s0_599 - vmull_s32(__s1_599, splat_laneq_s32(__s2_599, __p3_599)); \
-  __ret_599; \
-})
-#else
-#define vmlsl_laneq_s32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \
-  int64x2_t __s0_600 = __p0_600; \
-  int32x2_t __s1_600 = __p1_600; \
-  int32x4_t __s2_600 = __p2_600; \
-  int64x2_t __rev0_600;  __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 1, 0); \
-  int32x2_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 1, 0); \
-  int32x4_t __rev2_600;  __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 3, 2, 1, 0); \
-  int64x2_t __ret_600; \
-  __ret_600 = __rev0_600 - __noswap_vmull_s32(__rev1_600, __noswap_splat_laneq_s32(__rev2_600, __p3_600)); \
-  __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 1, 0); \
-  __ret_600; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \
-  int32x4_t __s0_601 = __p0_601; \
-  int16x4_t __s1_601 = __p1_601; \
-  int16x8_t __s2_601 = __p2_601; \
-  int32x4_t __ret_601; \
-  __ret_601 = __s0_601 - vmull_s16(__s1_601, splat_laneq_s16(__s2_601, __p3_601)); \
-  __ret_601; \
-})
-#else
-#define vmlsl_laneq_s16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \
-  int32x4_t __s0_602 = __p0_602; \
-  int16x4_t __s1_602 = __p1_602; \
-  int16x8_t __s2_602 = __p2_602; \
-  int32x4_t __rev0_602;  __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 3, 2, 1, 0); \
-  int16x4_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \
-  int16x8_t __rev2_602;  __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_602; \
-  __ret_602 = __rev0_602 - __noswap_vmull_s16(__rev1_602, __noswap_splat_laneq_s16(__rev2_602, __p3_602)); \
-  __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 3, 2, 1, 0); \
-  __ret_602; \
-})
-#endif
-
-__ai poly64x1_t vmov_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmov_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_603) {
-  uint16x8_t __ret_603;
-  uint8x8_t __a1_603 = vget_high_u8(__p0_603);
-  __ret_603 = (uint16x8_t)(vshll_n_u8(__a1_603, 0));
-  return __ret_603;
-}
-#else
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_604) {
-  uint8x16_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__p0_604, __p0_604, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret_604;
-  uint8x8_t __a1_604 = __noswap_vget_high_u8(__rev0_604);
-  __ret_604 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_604, 0));
-  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_604;
-}
-__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_605) {
-  uint16x8_t __ret_605;
-  uint8x8_t __a1_605 = __noswap_vget_high_u8(__p0_605);
-  __ret_605 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_605, 0));
-  return __ret_605;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_606) {
-  uint64x2_t __ret_606;
-  uint32x2_t __a1_606 = vget_high_u32(__p0_606);
-  __ret_606 = (uint64x2_t)(vshll_n_u32(__a1_606, 0));
-  return __ret_606;
-}
-#else
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_607) {
-  uint32x4_t __rev0_607;  __rev0_607 = __builtin_shufflevector(__p0_607, __p0_607, 3, 2, 1, 0);
-  uint64x2_t __ret_607;
-  uint32x2_t __a1_607 = __noswap_vget_high_u32(__rev0_607);
-  __ret_607 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_607, 0));
-  __ret_607 = __builtin_shufflevector(__ret_607, __ret_607, 1, 0);
-  return __ret_607;
-}
-__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_608) {
-  uint64x2_t __ret_608;
-  uint32x2_t __a1_608 = __noswap_vget_high_u32(__p0_608);
-  __ret_608 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_608, 0));
-  return __ret_608;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_609) {
-  uint32x4_t __ret_609;
-  uint16x4_t __a1_609 = vget_high_u16(__p0_609);
-  __ret_609 = (uint32x4_t)(vshll_n_u16(__a1_609, 0));
-  return __ret_609;
-}
-#else
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_610) {
-  uint16x8_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__p0_610, __p0_610, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret_610;
-  uint16x4_t __a1_610 = __noswap_vget_high_u16(__rev0_610);
-  __ret_610 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_610, 0));
-  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0);
-  return __ret_610;
-}
-__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_611) {
-  uint32x4_t __ret_611;
-  uint16x4_t __a1_611 = __noswap_vget_high_u16(__p0_611);
-  __ret_611 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_611, 0));
-  return __ret_611;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_612) {
-  int16x8_t __ret_612;
-  int8x8_t __a1_612 = vget_high_s8(__p0_612);
-  __ret_612 = (int16x8_t)(vshll_n_s8(__a1_612, 0));
-  return __ret_612;
-}
-#else
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_613) {
-  int8x16_t __rev0_613;  __rev0_613 = __builtin_shufflevector(__p0_613, __p0_613, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret_613;
-  int8x8_t __a1_613 = __noswap_vget_high_s8(__rev0_613);
-  __ret_613 = (int16x8_t)(__noswap_vshll_n_s8(__a1_613, 0));
-  __ret_613 = __builtin_shufflevector(__ret_613, __ret_613, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_613;
-}
-__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_614) {
-  int16x8_t __ret_614;
-  int8x8_t __a1_614 = __noswap_vget_high_s8(__p0_614);
-  __ret_614 = (int16x8_t)(__noswap_vshll_n_s8(__a1_614, 0));
-  return __ret_614;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_615) {
-  int64x2_t __ret_615;
-  int32x2_t __a1_615 = vget_high_s32(__p0_615);
-  __ret_615 = (int64x2_t)(vshll_n_s32(__a1_615, 0));
-  return __ret_615;
-}
-#else
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_616) {
-  int32x4_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__p0_616, __p0_616, 3, 2, 1, 0);
-  int64x2_t __ret_616;
-  int32x2_t __a1_616 = __noswap_vget_high_s32(__rev0_616);
-  __ret_616 = (int64x2_t)(__noswap_vshll_n_s32(__a1_616, 0));
-  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0);
-  return __ret_616;
-}
-__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_617) {
-  int64x2_t __ret_617;
-  int32x2_t __a1_617 = __noswap_vget_high_s32(__p0_617);
-  __ret_617 = (int64x2_t)(__noswap_vshll_n_s32(__a1_617, 0));
-  return __ret_617;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_618) {
-  int32x4_t __ret_618;
-  int16x4_t __a1_618 = vget_high_s16(__p0_618);
-  __ret_618 = (int32x4_t)(vshll_n_s16(__a1_618, 0));
-  return __ret_618;
-}
-#else
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_619) {
-  int16x8_t __rev0_619;  __rev0_619 = __builtin_shufflevector(__p0_619, __p0_619, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret_619;
-  int16x4_t __a1_619 = __noswap_vget_high_s16(__rev0_619);
-  __ret_619 = (int32x4_t)(__noswap_vshll_n_s16(__a1_619, 0));
-  __ret_619 = __builtin_shufflevector(__ret_619, __ret_619, 3, 2, 1, 0);
-  return __ret_619;
-}
-__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_620) {
-  int32x4_t __ret_620;
-  int16x4_t __a1_620 = __noswap_vget_high_s16(__p0_620);
-  __ret_620 = (int32x4_t)(__noswap_vshll_n_s16(__a1_620, 0));
-  return __ret_620;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#define vmuld_lane_f64(__p0_621, __p1_621, __p2_621) __extension__ ({ \
-  float64_t __s0_621 = __p0_621; \
-  float64x1_t __s1_621 = __p1_621; \
-  float64_t __ret_621; \
-  __ret_621 = __s0_621 * vget_lane_f64(__s1_621, __p2_621); \
-  __ret_621; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_lane_f32(__p0_622, __p1_622, __p2_622) __extension__ ({ \
-  float32_t __s0_622 = __p0_622; \
-  float32x2_t __s1_622 = __p1_622; \
-  float32_t __ret_622; \
-  __ret_622 = __s0_622 * vget_lane_f32(__s1_622, __p2_622); \
-  __ret_622; \
-})
-#else
-#define vmuls_lane_f32(__p0_623, __p1_623, __p2_623) __extension__ ({ \
-  float32_t __s0_623 = __p0_623; \
-  float32x2_t __s1_623 = __p1_623; \
-  float32x2_t __rev1_623;  __rev1_623 = __builtin_shufflevector(__s1_623, __s1_623, 1, 0); \
-  float32_t __ret_623; \
-  __ret_623 = __s0_623 * __noswap_vget_lane_f32(__rev1_623, __p2_623); \
-  __ret_623; \
-})
-#endif
-
-#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f64(__p0_624, __p1_624, __p2_624) __extension__ ({ \
-  float64x2_t __s0_624 = __p0_624; \
-  float64x1_t __s1_624 = __p1_624; \
-  float64x2_t __ret_624; \
-  __ret_624 = __s0_624 * splatq_lane_f64(__s1_624, __p2_624); \
-  __ret_624; \
-})
-#else
-#define vmulq_lane_f64(__p0_625, __p1_625, __p2_625) __extension__ ({ \
-  float64x2_t __s0_625 = __p0_625; \
-  float64x1_t __s1_625 = __p1_625; \
-  float64x2_t __rev0_625;  __rev0_625 = __builtin_shufflevector(__s0_625, __s0_625, 1, 0); \
-  float64x2_t __ret_625; \
-  __ret_625 = __rev0_625 * __noswap_splatq_lane_f64(__s1_625, __p2_625); \
-  __ret_625 = __builtin_shufflevector(__ret_625, __ret_625, 1, 0); \
-  __ret_625; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuld_laneq_f64(__p0_626, __p1_626, __p2_626) __extension__ ({ \
-  float64_t __s0_626 = __p0_626; \
-  float64x2_t __s1_626 = __p1_626; \
-  float64_t __ret_626; \
-  __ret_626 = __s0_626 * vgetq_lane_f64(__s1_626, __p2_626); \
-  __ret_626; \
-})
-#else
-#define vmuld_laneq_f64(__p0_627, __p1_627, __p2_627) __extension__ ({ \
-  float64_t __s0_627 = __p0_627; \
-  float64x2_t __s1_627 = __p1_627; \
-  float64x2_t __rev1_627;  __rev1_627 = __builtin_shufflevector(__s1_627, __s1_627, 1, 0); \
-  float64_t __ret_627; \
-  __ret_627 = __s0_627 * __noswap_vgetq_lane_f64(__rev1_627, __p2_627); \
-  __ret_627; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_laneq_f32(__p0_628, __p1_628, __p2_628) __extension__ ({ \
-  float32_t __s0_628 = __p0_628; \
-  float32x4_t __s1_628 = __p1_628; \
-  float32_t __ret_628; \
-  __ret_628 = __s0_628 * vgetq_lane_f32(__s1_628, __p2_628); \
-  __ret_628; \
-})
-#else
-#define vmuls_laneq_f32(__p0_629, __p1_629, __p2_629) __extension__ ({ \
-  float32_t __s0_629 = __p0_629; \
-  float32x4_t __s1_629 = __p1_629; \
-  float32x4_t __rev1_629;  __rev1_629 = __builtin_shufflevector(__s1_629, __s1_629, 3, 2, 1, 0); \
-  float32_t __ret_629; \
-  __ret_629 = __s0_629 * __noswap_vgetq_lane_f32(__rev1_629, __p2_629); \
-  __ret_629; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
-  __ret; \
-})
-#else
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u32(__p0_630, __p1_630, __p2_630) __extension__ ({ \
-  uint32x4_t __s0_630 = __p0_630; \
-  uint32x4_t __s1_630 = __p1_630; \
-  uint32x4_t __ret_630; \
-  __ret_630 = __s0_630 * splatq_laneq_u32(__s1_630, __p2_630); \
-  __ret_630; \
-})
-#else
-#define vmulq_laneq_u32(__p0_631, __p1_631, __p2_631) __extension__ ({ \
-  uint32x4_t __s0_631 = __p0_631; \
-  uint32x4_t __s1_631 = __p1_631; \
-  uint32x4_t __rev0_631;  __rev0_631 = __builtin_shufflevector(__s0_631, __s0_631, 3, 2, 1, 0); \
-  uint32x4_t __rev1_631;  __rev1_631 = __builtin_shufflevector(__s1_631, __s1_631, 3, 2, 1, 0); \
-  uint32x4_t __ret_631; \
-  __ret_631 = __rev0_631 * __noswap_splatq_laneq_u32(__rev1_631, __p2_631); \
-  __ret_631 = __builtin_shufflevector(__ret_631, __ret_631, 3, 2, 1, 0); \
-  __ret_631; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u16(__p0_632, __p1_632, __p2_632) __extension__ ({ \
-  uint16x8_t __s0_632 = __p0_632; \
-  uint16x8_t __s1_632 = __p1_632; \
-  uint16x8_t __ret_632; \
-  __ret_632 = __s0_632 * splatq_laneq_u16(__s1_632, __p2_632); \
-  __ret_632; \
-})
-#else
-#define vmulq_laneq_u16(__p0_633, __p1_633, __p2_633) __extension__ ({ \
-  uint16x8_t __s0_633 = __p0_633; \
-  uint16x8_t __s1_633 = __p1_633; \
-  uint16x8_t __rev0_633;  __rev0_633 = __builtin_shufflevector(__s0_633, __s0_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_633;  __rev1_633 = __builtin_shufflevector(__s1_633, __s1_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_633; \
-  __ret_633 = __rev0_633 * __noswap_splatq_laneq_u16(__rev1_633, __p2_633); \
-  __ret_633 = __builtin_shufflevector(__ret_633, __ret_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_633; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f64(__p0_634, __p1_634, __p2_634) __extension__ ({ \
-  float64x2_t __s0_634 = __p0_634; \
-  float64x2_t __s1_634 = __p1_634; \
-  float64x2_t __ret_634; \
-  __ret_634 = __s0_634 * splatq_laneq_f64(__s1_634, __p2_634); \
-  __ret_634; \
-})
-#else
-#define vmulq_laneq_f64(__p0_635, __p1_635, __p2_635) __extension__ ({ \
-  float64x2_t __s0_635 = __p0_635; \
-  float64x2_t __s1_635 = __p1_635; \
-  float64x2_t __rev0_635;  __rev0_635 = __builtin_shufflevector(__s0_635, __s0_635, 1, 0); \
-  float64x2_t __rev1_635;  __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, 1, 0); \
-  float64x2_t __ret_635; \
-  __ret_635 = __rev0_635 * __noswap_splatq_laneq_f64(__rev1_635, __p2_635); \
-  __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 1, 0); \
-  __ret_635; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f32(__p0_636, __p1_636, __p2_636) __extension__ ({ \
-  float32x4_t __s0_636 = __p0_636; \
-  float32x4_t __s1_636 = __p1_636; \
-  float32x4_t __ret_636; \
-  __ret_636 = __s0_636 * splatq_laneq_f32(__s1_636, __p2_636); \
-  __ret_636; \
-})
-#else
-#define vmulq_laneq_f32(__p0_637, __p1_637, __p2_637) __extension__ ({ \
-  float32x4_t __s0_637 = __p0_637; \
-  float32x4_t __s1_637 = __p1_637; \
-  float32x4_t __rev0_637;  __rev0_637 = __builtin_shufflevector(__s0_637, __s0_637, 3, 2, 1, 0); \
-  float32x4_t __rev1_637;  __rev1_637 = __builtin_shufflevector(__s1_637, __s1_637, 3, 2, 1, 0); \
-  float32x4_t __ret_637; \
-  __ret_637 = __rev0_637 * __noswap_splatq_laneq_f32(__rev1_637, __p2_637); \
-  __ret_637 = __builtin_shufflevector(__ret_637, __ret_637, 3, 2, 1, 0); \
-  __ret_637; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s32(__p0_638, __p1_638, __p2_638) __extension__ ({ \
-  int32x4_t __s0_638 = __p0_638; \
-  int32x4_t __s1_638 = __p1_638; \
-  int32x4_t __ret_638; \
-  __ret_638 = __s0_638 * splatq_laneq_s32(__s1_638, __p2_638); \
-  __ret_638; \
-})
-#else
-#define vmulq_laneq_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \
-  int32x4_t __s0_639 = __p0_639; \
-  int32x4_t __s1_639 = __p1_639; \
-  int32x4_t __rev0_639;  __rev0_639 = __builtin_shufflevector(__s0_639, __s0_639, 3, 2, 1, 0); \
-  int32x4_t __rev1_639;  __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 3, 2, 1, 0); \
-  int32x4_t __ret_639; \
-  __ret_639 = __rev0_639 * __noswap_splatq_laneq_s32(__rev1_639, __p2_639); \
-  __ret_639 = __builtin_shufflevector(__ret_639, __ret_639, 3, 2, 1, 0); \
-  __ret_639; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s16(__p0_640, __p1_640, __p2_640) __extension__ ({ \
-  int16x8_t __s0_640 = __p0_640; \
-  int16x8_t __s1_640 = __p1_640; \
-  int16x8_t __ret_640; \
-  __ret_640 = __s0_640 * splatq_laneq_s16(__s1_640, __p2_640); \
-  __ret_640; \
-})
-#else
-#define vmulq_laneq_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \
-  int16x8_t __s0_641 = __p0_641; \
-  int16x8_t __s1_641 = __p1_641; \
-  int16x8_t __rev0_641;  __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_641;  __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_641; \
-  __ret_641 = __rev0_641 * __noswap_splatq_laneq_s16(__rev1_641, __p2_641); \
-  __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_641; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u32(__p0_642, __p1_642, __p2_642) __extension__ ({ \
-  uint32x2_t __s0_642 = __p0_642; \
-  uint32x4_t __s1_642 = __p1_642; \
-  uint32x2_t __ret_642; \
-  __ret_642 = __s0_642 * splat_laneq_u32(__s1_642, __p2_642); \
-  __ret_642; \
-})
-#else
-#define vmul_laneq_u32(__p0_643, __p1_643, __p2_643) __extension__ ({ \
-  uint32x2_t __s0_643 = __p0_643; \
-  uint32x4_t __s1_643 = __p1_643; \
-  uint32x2_t __rev0_643;  __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, 1, 0); \
-  uint32x4_t __rev1_643;  __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 3, 2, 1, 0); \
-  uint32x2_t __ret_643; \
-  __ret_643 = __rev0_643 * __noswap_splat_laneq_u32(__rev1_643, __p2_643); \
-  __ret_643 = __builtin_shufflevector(__ret_643, __ret_643, 1, 0); \
-  __ret_643; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u16(__p0_644, __p1_644, __p2_644) __extension__ ({ \
-  uint16x4_t __s0_644 = __p0_644; \
-  uint16x8_t __s1_644 = __p1_644; \
-  uint16x4_t __ret_644; \
-  __ret_644 = __s0_644 * splat_laneq_u16(__s1_644, __p2_644); \
-  __ret_644; \
-})
-#else
-#define vmul_laneq_u16(__p0_645, __p1_645, __p2_645) __extension__ ({ \
-  uint16x4_t __s0_645 = __p0_645; \
-  uint16x8_t __s1_645 = __p1_645; \
-  uint16x4_t __rev0_645;  __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, 3, 2, 1, 0); \
-  uint16x8_t __rev1_645;  __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_645; \
-  __ret_645 = __rev0_645 * __noswap_splat_laneq_u16(__rev1_645, __p2_645); \
-  __ret_645 = __builtin_shufflevector(__ret_645, __ret_645, 3, 2, 1, 0); \
-  __ret_645; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f32(__p0_646, __p1_646, __p2_646) __extension__ ({ \
-  float32x2_t __s0_646 = __p0_646; \
-  float32x4_t __s1_646 = __p1_646; \
-  float32x2_t __ret_646; \
-  __ret_646 = __s0_646 * splat_laneq_f32(__s1_646, __p2_646); \
-  __ret_646; \
-})
-#else
-#define vmul_laneq_f32(__p0_647, __p1_647, __p2_647) __extension__ ({ \
-  float32x2_t __s0_647 = __p0_647; \
-  float32x4_t __s1_647 = __p1_647; \
-  float32x2_t __rev0_647;  __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 1, 0); \
-  float32x4_t __rev1_647;  __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 3, 2, 1, 0); \
-  float32x2_t __ret_647; \
-  __ret_647 = __rev0_647 * __noswap_splat_laneq_f32(__rev1_647, __p2_647); \
-  __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 1, 0); \
-  __ret_647; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \
-  int32x2_t __s0_648 = __p0_648; \
-  int32x4_t __s1_648 = __p1_648; \
-  int32x2_t __ret_648; \
-  __ret_648 = __s0_648 * splat_laneq_s32(__s1_648, __p2_648); \
-  __ret_648; \
-})
-#else
-#define vmul_laneq_s32(__p0_649, __p1_649, __p2_649) __extension__ ({ \
-  int32x2_t __s0_649 = __p0_649; \
-  int32x4_t __s1_649 = __p1_649; \
-  int32x2_t __rev0_649;  __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 1, 0); \
-  int32x4_t __rev1_649;  __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 3, 2, 1, 0); \
-  int32x2_t __ret_649; \
-  __ret_649 = __rev0_649 * __noswap_splat_laneq_s32(__rev1_649, __p2_649); \
-  __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 1, 0); \
-  __ret_649; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \
-  int16x4_t __s0_650 = __p0_650; \
-  int16x8_t __s1_650 = __p1_650; \
-  int16x4_t __ret_650; \
-  __ret_650 = __s0_650 * splat_laneq_s16(__s1_650, __p2_650); \
-  __ret_650; \
-})
-#else
-#define vmul_laneq_s16(__p0_651, __p1_651, __p2_651) __extension__ ({ \
-  int16x4_t __s0_651 = __p0_651; \
-  int16x8_t __s1_651 = __p1_651; \
-  int16x4_t __rev0_651;  __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 3, 2, 1, 0); \
-  int16x8_t __rev1_651;  __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_651; \
-  __ret_651 = __rev0_651 * __noswap_splat_laneq_s16(__rev1_651, __p2_651); \
-  __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 3, 2, 1, 0); \
-  __ret_651; \
-})
-#endif
-
-__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * (float64x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * (float64x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
-  poly128_t __ret;
-  __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly16x8_t __ret;
-  __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
-  return __ret;
-}
-#else
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
-  return __ret;
-}
-#else
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u32(__p0_652, __p1_652, __p2_652) __extension__ ({ \
-  uint32x4_t __s0_652 = __p0_652; \
-  uint32x2_t __s1_652 = __p1_652; \
-  uint64x2_t __ret_652; \
-  __ret_652 = vmull_u32(vget_high_u32(__s0_652), splat_lane_u32(__s1_652, __p2_652)); \
-  __ret_652; \
-})
-#else
-#define vmull_high_lane_u32(__p0_653, __p1_653, __p2_653) __extension__ ({ \
-  uint32x4_t __s0_653 = __p0_653; \
-  uint32x2_t __s1_653 = __p1_653; \
-  uint32x4_t __rev0_653;  __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 3, 2, 1, 0); \
-  uint32x2_t __rev1_653;  __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 1, 0); \
-  uint64x2_t __ret_653; \
-  __ret_653 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_653), __noswap_splat_lane_u32(__rev1_653, __p2_653)); \
-  __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 1, 0); \
-  __ret_653; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u16(__p0_654, __p1_654, __p2_654) __extension__ ({ \
-  uint16x8_t __s0_654 = __p0_654; \
-  uint16x4_t __s1_654 = __p1_654; \
-  uint32x4_t __ret_654; \
-  __ret_654 = vmull_u16(vget_high_u16(__s0_654), splat_lane_u16(__s1_654, __p2_654)); \
-  __ret_654; \
-})
-#else
-#define vmull_high_lane_u16(__p0_655, __p1_655, __p2_655) __extension__ ({ \
-  uint16x8_t __s0_655 = __p0_655; \
-  uint16x4_t __s1_655 = __p1_655; \
-  uint16x8_t __rev0_655;  __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1_655;  __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \
-  uint32x4_t __ret_655; \
-  __ret_655 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_655), __noswap_splat_lane_u16(__rev1_655, __p2_655)); \
-  __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 3, 2, 1, 0); \
-  __ret_655; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \
-  int32x4_t __s0_656 = __p0_656; \
-  int32x2_t __s1_656 = __p1_656; \
-  int64x2_t __ret_656; \
-  __ret_656 = vmull_s32(vget_high_s32(__s0_656), splat_lane_s32(__s1_656, __p2_656)); \
-  __ret_656; \
-})
-#else
-#define vmull_high_lane_s32(__p0_657, __p1_657, __p2_657) __extension__ ({ \
-  int32x4_t __s0_657 = __p0_657; \
-  int32x2_t __s1_657 = __p1_657; \
-  int32x4_t __rev0_657;  __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 3, 2, 1, 0); \
-  int32x2_t __rev1_657;  __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 1, 0); \
-  int64x2_t __ret_657; \
-  __ret_657 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_657), __noswap_splat_lane_s32(__rev1_657, __p2_657)); \
-  __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 1, 0); \
-  __ret_657; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \
-  int16x8_t __s0_658 = __p0_658; \
-  int16x4_t __s1_658 = __p1_658; \
-  int32x4_t __ret_658; \
-  __ret_658 = vmull_s16(vget_high_s16(__s0_658), splat_lane_s16(__s1_658, __p2_658)); \
-  __ret_658; \
-})
-#else
-#define vmull_high_lane_s16(__p0_659, __p1_659, __p2_659) __extension__ ({ \
-  int16x8_t __s0_659 = __p0_659; \
-  int16x4_t __s1_659 = __p1_659; \
-  int16x8_t __rev0_659;  __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_659;  __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \
-  int32x4_t __ret_659; \
-  __ret_659 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_659), __noswap_splat_lane_s16(__rev1_659, __p2_659)); \
-  __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 3, 2, 1, 0); \
-  __ret_659; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u32(__p0_660, __p1_660, __p2_660) __extension__ ({ \
-  uint32x4_t __s0_660 = __p0_660; \
-  uint32x4_t __s1_660 = __p1_660; \
-  uint64x2_t __ret_660; \
-  __ret_660 = vmull_u32(vget_high_u32(__s0_660), splat_laneq_u32(__s1_660, __p2_660)); \
-  __ret_660; \
-})
-#else
-#define vmull_high_laneq_u32(__p0_661, __p1_661, __p2_661) __extension__ ({ \
-  uint32x4_t __s0_661 = __p0_661; \
-  uint32x4_t __s1_661 = __p1_661; \
-  uint32x4_t __rev0_661;  __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \
-  uint32x4_t __rev1_661;  __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 3, 2, 1, 0); \
-  uint64x2_t __ret_661; \
-  __ret_661 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_661), __noswap_splat_laneq_u32(__rev1_661, __p2_661)); \
-  __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 1, 0); \
-  __ret_661; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u16(__p0_662, __p1_662, __p2_662) __extension__ ({ \
-  uint16x8_t __s0_662 = __p0_662; \
-  uint16x8_t __s1_662 = __p1_662; \
-  uint32x4_t __ret_662; \
-  __ret_662 = vmull_u16(vget_high_u16(__s0_662), splat_laneq_u16(__s1_662, __p2_662)); \
-  __ret_662; \
-})
-#else
-#define vmull_high_laneq_u16(__p0_663, __p1_663, __p2_663) __extension__ ({ \
-  uint16x8_t __s0_663 = __p0_663; \
-  uint16x8_t __s1_663 = __p1_663; \
-  uint16x8_t __rev0_663;  __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_663;  __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_663; \
-  __ret_663 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_663), __noswap_splat_laneq_u16(__rev1_663, __p2_663)); \
-  __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 3, 2, 1, 0); \
-  __ret_663; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
-  int32x4_t __s0_664 = __p0_664; \
-  int32x4_t __s1_664 = __p1_664; \
-  int64x2_t __ret_664; \
-  __ret_664 = vmull_s32(vget_high_s32(__s0_664), splat_laneq_s32(__s1_664, __p2_664)); \
-  __ret_664; \
-})
-#else
-#define vmull_high_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \
-  int32x4_t __s0_665 = __p0_665; \
-  int32x4_t __s1_665 = __p1_665; \
-  int32x4_t __rev0_665;  __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 3, 2, 1, 0); \
-  int32x4_t __rev1_665;  __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \
-  int64x2_t __ret_665; \
-  __ret_665 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_665), __noswap_splat_laneq_s32(__rev1_665, __p2_665)); \
-  __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \
-  __ret_665; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \
-  int16x8_t __s0_666 = __p0_666; \
-  int16x8_t __s1_666 = __p1_666; \
-  int32x4_t __ret_666; \
-  __ret_666 = vmull_s16(vget_high_s16(__s0_666), splat_laneq_s16(__s1_666, __p2_666)); \
-  __ret_666; \
-})
-#else
-#define vmull_high_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
-  int16x8_t __s0_667 = __p0_667; \
-  int16x8_t __s1_667 = __p1_667; \
-  int16x8_t __rev0_667;  __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_667;  __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_667; \
-  __ret_667 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_667), __noswap_splat_laneq_s16(__rev1_667, __p2_667)); \
-  __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \
-  __ret_667; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \
-  uint32x2_t __s0_668 = __p0_668; \
-  uint32x4_t __s1_668 = __p1_668; \
-  uint64x2_t __ret_668; \
-  __ret_668 = vmull_u32(__s0_668, splat_laneq_u32(__s1_668, __p2_668)); \
-  __ret_668; \
-})
-#else
-#define vmull_laneq_u32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
-  uint32x2_t __s0_669 = __p0_669; \
-  uint32x4_t __s1_669 = __p1_669; \
-  uint32x2_t __rev0_669;  __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 1, 0); \
-  uint32x4_t __rev1_669;  __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 3, 2, 1, 0); \
-  uint64x2_t __ret_669; \
-  __ret_669 = __noswap_vmull_u32(__rev0_669, __noswap_splat_laneq_u32(__rev1_669, __p2_669)); \
-  __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \
-  __ret_669; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u16(__p0_670, __p1_670, __p2_670) __extension__ ({ \
-  uint16x4_t __s0_670 = __p0_670; \
-  uint16x8_t __s1_670 = __p1_670; \
-  uint32x4_t __ret_670; \
-  __ret_670 = vmull_u16(__s0_670, splat_laneq_u16(__s1_670, __p2_670)); \
-  __ret_670; \
-})
-#else
-#define vmull_laneq_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \
-  uint16x4_t __s0_671 = __p0_671; \
-  uint16x8_t __s1_671 = __p1_671; \
-  uint16x4_t __rev0_671;  __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 3, 2, 1, 0); \
-  uint16x8_t __rev1_671;  __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_671; \
-  __ret_671 = __noswap_vmull_u16(__rev0_671, __noswap_splat_laneq_u16(__rev1_671, __p2_671)); \
-  __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \
-  __ret_671; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \
-  int32x2_t __s0_672 = __p0_672; \
-  int32x4_t __s1_672 = __p1_672; \
-  int64x2_t __ret_672; \
-  __ret_672 = vmull_s32(__s0_672, splat_laneq_s32(__s1_672, __p2_672)); \
-  __ret_672; \
-})
-#else
-#define vmull_laneq_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \
-  int32x2_t __s0_673 = __p0_673; \
-  int32x4_t __s1_673 = __p1_673; \
-  int32x2_t __rev0_673;  __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 1, 0); \
-  int32x4_t __rev1_673;  __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 3, 2, 1, 0); \
-  int64x2_t __ret_673; \
-  __ret_673 = __noswap_vmull_s32(__rev0_673, __noswap_splat_laneq_s32(__rev1_673, __p2_673)); \
-  __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \
-  __ret_673; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
-  int16x4_t __s0_674 = __p0_674; \
-  int16x8_t __s1_674 = __p1_674; \
-  int32x4_t __ret_674; \
-  __ret_674 = vmull_s16(__s0_674, splat_laneq_s16(__s1_674, __p2_674)); \
-  __ret_674; \
-})
-#else
-#define vmull_laneq_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \
-  int16x4_t __s0_675 = __p0_675; \
-  int16x8_t __s1_675 = __p1_675; \
-  int16x4_t __rev0_675;  __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 3, 2, 1, 0); \
-  int16x8_t __rev1_675;  __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_675; \
-  __ret_675 = __noswap_vmull_s16(__rev0_675, __noswap_splat_laneq_s16(__rev1_675, __p2_675)); \
-  __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \
-  __ret_675; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
-  return __ret;
-}
-#define vmulxd_lane_f64(__p0_676, __p1_676, __p2_676) __extension__ ({ \
-  float64_t __s0_676 = __p0_676; \
-  float64x1_t __s1_676 = __p1_676; \
-  float64_t __ret_676; \
-  __ret_676 = vmulxd_f64(__s0_676, vget_lane_f64(__s1_676, __p2_676)); \
-  __ret_676; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_lane_f32(__p0_677, __p1_677, __p2_677) __extension__ ({ \
-  float32_t __s0_677 = __p0_677; \
-  float32x2_t __s1_677 = __p1_677; \
-  float32_t __ret_677; \
-  __ret_677 = vmulxs_f32(__s0_677, vget_lane_f32(__s1_677, __p2_677)); \
-  __ret_677; \
-})
-#else
-#define vmulxs_lane_f32(__p0_678, __p1_678, __p2_678) __extension__ ({ \
-  float32_t __s0_678 = __p0_678; \
-  float32x2_t __s1_678 = __p1_678; \
-  float32x2_t __rev1_678;  __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 1, 0); \
-  float32_t __ret_678; \
-  __ret_678 = vmulxs_f32(__s0_678, __noswap_vget_lane_f32(__rev1_678, __p2_678)); \
-  __ret_678; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f64(__p0_679, __p1_679, __p2_679) __extension__ ({ \
-  float64x2_t __s0_679 = __p0_679; \
-  float64x1_t __s1_679 = __p1_679; \
-  float64x2_t __ret_679; \
-  __ret_679 = vmulxq_f64(__s0_679, splatq_lane_f64(__s1_679, __p2_679)); \
-  __ret_679; \
-})
-#else
-#define vmulxq_lane_f64(__p0_680, __p1_680, __p2_680) __extension__ ({ \
-  float64x2_t __s0_680 = __p0_680; \
-  float64x1_t __s1_680 = __p1_680; \
-  float64x2_t __rev0_680;  __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 1, 0); \
-  float64x2_t __ret_680; \
-  __ret_680 = __noswap_vmulxq_f64(__rev0_680, __noswap_splatq_lane_f64(__s1_680, __p2_680)); \
-  __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 1, 0); \
-  __ret_680; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
-  float32x4_t __s0_681 = __p0_681; \
-  float32x2_t __s1_681 = __p1_681; \
-  float32x4_t __ret_681; \
-  __ret_681 = vmulxq_f32(__s0_681, splatq_lane_f32(__s1_681, __p2_681)); \
-  __ret_681; \
-})
-#else
-#define vmulxq_lane_f32(__p0_682, __p1_682, __p2_682) __extension__ ({ \
-  float32x4_t __s0_682 = __p0_682; \
-  float32x2_t __s1_682 = __p1_682; \
-  float32x4_t __rev0_682;  __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 3, 2, 1, 0); \
-  float32x2_t __rev1_682;  __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 1, 0); \
-  float32x4_t __ret_682; \
-  __ret_682 = __noswap_vmulxq_f32(__rev0_682, __noswap_splatq_lane_f32(__rev1_682, __p2_682)); \
-  __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 3, 2, 1, 0); \
-  __ret_682; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f32(__p0_683, __p1_683, __p2_683) __extension__ ({ \
-  float32x2_t __s0_683 = __p0_683; \
-  float32x2_t __s1_683 = __p1_683; \
-  float32x2_t __ret_683; \
-  __ret_683 = vmulx_f32(__s0_683, splat_lane_f32(__s1_683, __p2_683)); \
-  __ret_683; \
-})
-#else
-#define vmulx_lane_f32(__p0_684, __p1_684, __p2_684) __extension__ ({ \
-  float32x2_t __s0_684 = __p0_684; \
-  float32x2_t __s1_684 = __p1_684; \
-  float32x2_t __rev0_684;  __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 1, 0); \
-  float32x2_t __rev1_684;  __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 1, 0); \
-  float32x2_t __ret_684; \
-  __ret_684 = __noswap_vmulx_f32(__rev0_684, __noswap_splat_lane_f32(__rev1_684, __p2_684)); \
-  __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 1, 0); \
-  __ret_684; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxd_laneq_f64(__p0_685, __p1_685, __p2_685) __extension__ ({ \
-  float64_t __s0_685 = __p0_685; \
-  float64x2_t __s1_685 = __p1_685; \
-  float64_t __ret_685; \
-  __ret_685 = vmulxd_f64(__s0_685, vgetq_lane_f64(__s1_685, __p2_685)); \
-  __ret_685; \
-})
-#else
-#define vmulxd_laneq_f64(__p0_686, __p1_686, __p2_686) __extension__ ({ \
-  float64_t __s0_686 = __p0_686; \
-  float64x2_t __s1_686 = __p1_686; \
-  float64x2_t __rev1_686;  __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 1, 0); \
-  float64_t __ret_686; \
-  __ret_686 = vmulxd_f64(__s0_686, __noswap_vgetq_lane_f64(__rev1_686, __p2_686)); \
-  __ret_686; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_laneq_f32(__p0_687, __p1_687, __p2_687) __extension__ ({ \
-  float32_t __s0_687 = __p0_687; \
-  float32x4_t __s1_687 = __p1_687; \
-  float32_t __ret_687; \
-  __ret_687 = vmulxs_f32(__s0_687, vgetq_lane_f32(__s1_687, __p2_687)); \
-  __ret_687; \
-})
-#else
-#define vmulxs_laneq_f32(__p0_688, __p1_688, __p2_688) __extension__ ({ \
-  float32_t __s0_688 = __p0_688; \
-  float32x4_t __s1_688 = __p1_688; \
-  float32x4_t __rev1_688;  __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 3, 2, 1, 0); \
-  float32_t __ret_688; \
-  __ret_688 = vmulxs_f32(__s0_688, __noswap_vgetq_lane_f32(__rev1_688, __p2_688)); \
-  __ret_688; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f64(__p0_689, __p1_689, __p2_689) __extension__ ({ \
-  float64x2_t __s0_689 = __p0_689; \
-  float64x2_t __s1_689 = __p1_689; \
-  float64x2_t __ret_689; \
-  __ret_689 = vmulxq_f64(__s0_689, splatq_laneq_f64(__s1_689, __p2_689)); \
-  __ret_689; \
-})
-#else
-#define vmulxq_laneq_f64(__p0_690, __p1_690, __p2_690) __extension__ ({ \
-  float64x2_t __s0_690 = __p0_690; \
-  float64x2_t __s1_690 = __p1_690; \
-  float64x2_t __rev0_690;  __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 1, 0); \
-  float64x2_t __rev1_690;  __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 1, 0); \
-  float64x2_t __ret_690; \
-  __ret_690 = __noswap_vmulxq_f64(__rev0_690, __noswap_splatq_laneq_f64(__rev1_690, __p2_690)); \
-  __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 1, 0); \
-  __ret_690; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f32(__p0_691, __p1_691, __p2_691) __extension__ ({ \
-  float32x4_t __s0_691 = __p0_691; \
-  float32x4_t __s1_691 = __p1_691; \
-  float32x4_t __ret_691; \
-  __ret_691 = vmulxq_f32(__s0_691, splatq_laneq_f32(__s1_691, __p2_691)); \
-  __ret_691; \
-})
-#else
-#define vmulxq_laneq_f32(__p0_692, __p1_692, __p2_692) __extension__ ({ \
-  float32x4_t __s0_692 = __p0_692; \
-  float32x4_t __s1_692 = __p1_692; \
-  float32x4_t __rev0_692;  __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 3, 2, 1, 0); \
-  float32x4_t __rev1_692;  __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 3, 2, 1, 0); \
-  float32x4_t __ret_692; \
-  __ret_692 = __noswap_vmulxq_f32(__rev0_692, __noswap_splatq_laneq_f32(__rev1_692, __p2_692)); \
-  __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 3, 2, 1, 0); \
-  __ret_692; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
-  float32x2_t __s0_693 = __p0_693; \
-  float32x4_t __s1_693 = __p1_693; \
-  float32x2_t __ret_693; \
-  __ret_693 = vmulx_f32(__s0_693, splat_laneq_f32(__s1_693, __p2_693)); \
-  __ret_693; \
-})
-#else
-#define vmulx_laneq_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
-  float32x2_t __s0_694 = __p0_694; \
-  float32x4_t __s1_694 = __p1_694; \
-  float32x2_t __rev0_694;  __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 1, 0); \
-  float32x4_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 3, 2, 1, 0); \
-  float32x2_t __ret_694; \
-  __ret_694 = __noswap_vmulx_f32(__rev0_694, __noswap_splat_laneq_f32(__rev1_694, __p2_694)); \
-  __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 1, 0); \
-  __ret_694; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vneg_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64x1_t vneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64_t vnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqabsb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqabss_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqabsh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
-  return __ret;
-}
-__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s32(__p0_695, __p1_695, __p2_695, __p3_695) __extension__ ({ \
-  int64x2_t __s0_695 = __p0_695; \
-  int32x4_t __s1_695 = __p1_695; \
-  int32x2_t __s2_695 = __p2_695; \
-  int64x2_t __ret_695; \
-  __ret_695 = vqdmlal_s32(__s0_695, vget_high_s32(__s1_695), splat_lane_s32(__s2_695, __p3_695)); \
-  __ret_695; \
-})
-#else
-#define vqdmlal_high_lane_s32(__p0_696, __p1_696, __p2_696, __p3_696) __extension__ ({ \
-  int64x2_t __s0_696 = __p0_696; \
-  int32x4_t __s1_696 = __p1_696; \
-  int32x2_t __s2_696 = __p2_696; \
-  int64x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
-  int32x4_t __rev1_696;  __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 3, 2, 1, 0); \
-  int32x2_t __rev2_696;  __rev2_696 = __builtin_shufflevector(__s2_696, __s2_696, 1, 0); \
-  int64x2_t __ret_696; \
-  __ret_696 = __noswap_vqdmlal_s32(__rev0_696, __noswap_vget_high_s32(__rev1_696), __noswap_splat_lane_s32(__rev2_696, __p3_696)); \
-  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \
-  __ret_696; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s16(__p0_697, __p1_697, __p2_697, __p3_697) __extension__ ({ \
-  int32x4_t __s0_697 = __p0_697; \
-  int16x8_t __s1_697 = __p1_697; \
-  int16x4_t __s2_697 = __p2_697; \
-  int32x4_t __ret_697; \
-  __ret_697 = vqdmlal_s16(__s0_697, vget_high_s16(__s1_697), splat_lane_s16(__s2_697, __p3_697)); \
-  __ret_697; \
-})
-#else
-#define vqdmlal_high_lane_s16(__p0_698, __p1_698, __p2_698, __p3_698) __extension__ ({ \
-  int32x4_t __s0_698 = __p0_698; \
-  int16x8_t __s1_698 = __p1_698; \
-  int16x4_t __s2_698 = __p2_698; \
-  int32x4_t __rev0_698;  __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \
-  int16x8_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_698;  __rev2_698 = __builtin_shufflevector(__s2_698, __s2_698, 3, 2, 1, 0); \
-  int32x4_t __ret_698; \
-  __ret_698 = __noswap_vqdmlal_s16(__rev0_698, __noswap_vget_high_s16(__rev1_698), __noswap_splat_lane_s16(__rev2_698, __p3_698)); \
-  __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 3, 2, 1, 0); \
-  __ret_698; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s32(__p0_699, __p1_699, __p2_699, __p3_699) __extension__ ({ \
-  int64x2_t __s0_699 = __p0_699; \
-  int32x4_t __s1_699 = __p1_699; \
-  int32x4_t __s2_699 = __p2_699; \
-  int64x2_t __ret_699; \
-  __ret_699 = vqdmlal_s32(__s0_699, vget_high_s32(__s1_699), splat_laneq_s32(__s2_699, __p3_699)); \
-  __ret_699; \
-})
-#else
-#define vqdmlal_high_laneq_s32(__p0_700, __p1_700, __p2_700, __p3_700) __extension__ ({ \
-  int64x2_t __s0_700 = __p0_700; \
-  int32x4_t __s1_700 = __p1_700; \
-  int32x4_t __s2_700 = __p2_700; \
-  int64x2_t __rev0_700;  __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \
-  int32x4_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 3, 2, 1, 0); \
-  int32x4_t __rev2_700;  __rev2_700 = __builtin_shufflevector(__s2_700, __s2_700, 3, 2, 1, 0); \
-  int64x2_t __ret_700; \
-  __ret_700 = __noswap_vqdmlal_s32(__rev0_700, __noswap_vget_high_s32(__rev1_700), __noswap_splat_laneq_s32(__rev2_700, __p3_700)); \
-  __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 1, 0); \
-  __ret_700; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s16(__p0_701, __p1_701, __p2_701, __p3_701) __extension__ ({ \
-  int32x4_t __s0_701 = __p0_701; \
-  int16x8_t __s1_701 = __p1_701; \
-  int16x8_t __s2_701 = __p2_701; \
-  int32x4_t __ret_701; \
-  __ret_701 = vqdmlal_s16(__s0_701, vget_high_s16(__s1_701), splat_laneq_s16(__s2_701, __p3_701)); \
-  __ret_701; \
-})
-#else
-#define vqdmlal_high_laneq_s16(__p0_702, __p1_702, __p2_702, __p3_702) __extension__ ({ \
-  int32x4_t __s0_702 = __p0_702; \
-  int16x8_t __s1_702 = __p1_702; \
-  int16x8_t __s2_702 = __p2_702; \
-  int32x4_t __rev0_702;  __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 3, 2, 1, 0); \
-  int16x8_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_702;  __rev2_702 = __builtin_shufflevector(__s2_702, __s2_702, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_702; \
-  __ret_702 = __noswap_vqdmlal_s16(__rev0_702, __noswap_vget_high_s16(__rev1_702), __noswap_splat_laneq_s16(__rev2_702, __p3_702)); \
-  __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 3, 2, 1, 0); \
-  __ret_702; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s32(__p0_703, __p1_703, __p2_703, __p3_703) __extension__ ({ \
-  int64x2_t __s0_703 = __p0_703; \
-  int32x2_t __s1_703 = __p1_703; \
-  int32x4_t __s2_703 = __p2_703; \
-  int64x2_t __ret_703; \
-  __ret_703 = vqdmlal_s32(__s0_703, __s1_703, splat_laneq_s32(__s2_703, __p3_703)); \
-  __ret_703; \
-})
-#else
-#define vqdmlal_laneq_s32(__p0_704, __p1_704, __p2_704, __p3_704) __extension__ ({ \
-  int64x2_t __s0_704 = __p0_704; \
-  int32x2_t __s1_704 = __p1_704; \
-  int32x4_t __s2_704 = __p2_704; \
-  int64x2_t __rev0_704;  __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 1, 0); \
-  int32x2_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 1, 0); \
-  int32x4_t __rev2_704;  __rev2_704 = __builtin_shufflevector(__s2_704, __s2_704, 3, 2, 1, 0); \
-  int64x2_t __ret_704; \
-  __ret_704 = __noswap_vqdmlal_s32(__rev0_704, __rev1_704, __noswap_splat_laneq_s32(__rev2_704, __p3_704)); \
-  __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 1, 0); \
-  __ret_704; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s16(__p0_705, __p1_705, __p2_705, __p3_705) __extension__ ({ \
-  int32x4_t __s0_705 = __p0_705; \
-  int16x4_t __s1_705 = __p1_705; \
-  int16x8_t __s2_705 = __p2_705; \
-  int32x4_t __ret_705; \
-  __ret_705 = vqdmlal_s16(__s0_705, __s1_705, splat_laneq_s16(__s2_705, __p3_705)); \
-  __ret_705; \
-})
-#else
-#define vqdmlal_laneq_s16(__p0_706, __p1_706, __p2_706, __p3_706) __extension__ ({ \
-  int32x4_t __s0_706 = __p0_706; \
-  int16x4_t __s1_706 = __p1_706; \
-  int16x8_t __s2_706 = __p2_706; \
-  int32x4_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 3, 2, 1, 0); \
-  int16x4_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 3, 2, 1, 0); \
-  int16x8_t __rev2_706;  __rev2_706 = __builtin_shufflevector(__s2_706, __s2_706, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_706; \
-  __ret_706 = __noswap_vqdmlal_s16(__rev0_706, __rev1_706, __noswap_splat_laneq_s16(__rev2_706, __p3_706)); \
-  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 3, 2, 1, 0); \
-  __ret_706; \
-})
-#endif
-
-__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s32(__p0_707, __p1_707, __p2_707, __p3_707) __extension__ ({ \
-  int64x2_t __s0_707 = __p0_707; \
-  int32x4_t __s1_707 = __p1_707; \
-  int32x2_t __s2_707 = __p2_707; \
-  int64x2_t __ret_707; \
-  __ret_707 = vqdmlsl_s32(__s0_707, vget_high_s32(__s1_707), splat_lane_s32(__s2_707, __p3_707)); \
-  __ret_707; \
-})
-#else
-#define vqdmlsl_high_lane_s32(__p0_708, __p1_708, __p2_708, __p3_708) __extension__ ({ \
-  int64x2_t __s0_708 = __p0_708; \
-  int32x4_t __s1_708 = __p1_708; \
-  int32x2_t __s2_708 = __p2_708; \
-  int64x2_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 1, 0); \
-  int32x4_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \
-  int32x2_t __rev2_708;  __rev2_708 = __builtin_shufflevector(__s2_708, __s2_708, 1, 0); \
-  int64x2_t __ret_708; \
-  __ret_708 = __noswap_vqdmlsl_s32(__rev0_708, __noswap_vget_high_s32(__rev1_708), __noswap_splat_lane_s32(__rev2_708, __p3_708)); \
-  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 1, 0); \
-  __ret_708; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s16(__p0_709, __p1_709, __p2_709, __p3_709) __extension__ ({ \
-  int32x4_t __s0_709 = __p0_709; \
-  int16x8_t __s1_709 = __p1_709; \
-  int16x4_t __s2_709 = __p2_709; \
-  int32x4_t __ret_709; \
-  __ret_709 = vqdmlsl_s16(__s0_709, vget_high_s16(__s1_709), splat_lane_s16(__s2_709, __p3_709)); \
-  __ret_709; \
-})
-#else
-#define vqdmlsl_high_lane_s16(__p0_710, __p1_710, __p2_710, __p3_710) __extension__ ({ \
-  int32x4_t __s0_710 = __p0_710; \
-  int16x8_t __s1_710 = __p1_710; \
-  int16x4_t __s2_710 = __p2_710; \
-  int32x4_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \
-  int16x8_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_710;  __rev2_710 = __builtin_shufflevector(__s2_710, __s2_710, 3, 2, 1, 0); \
-  int32x4_t __ret_710; \
-  __ret_710 = __noswap_vqdmlsl_s16(__rev0_710, __noswap_vget_high_s16(__rev1_710), __noswap_splat_lane_s16(__rev2_710, __p3_710)); \
-  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 3, 2, 1, 0); \
-  __ret_710; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \
-  int64x2_t __s0_711 = __p0_711; \
-  int32x4_t __s1_711 = __p1_711; \
-  int32x4_t __s2_711 = __p2_711; \
-  int64x2_t __ret_711; \
-  __ret_711 = vqdmlsl_s32(__s0_711, vget_high_s32(__s1_711), splat_laneq_s32(__s2_711, __p3_711)); \
-  __ret_711; \
-})
-#else
-#define vqdmlsl_high_laneq_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \
-  int64x2_t __s0_712 = __p0_712; \
-  int32x4_t __s1_712 = __p1_712; \
-  int32x4_t __s2_712 = __p2_712; \
-  int64x2_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \
-  int32x4_t __rev1_712;  __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \
-  int32x4_t __rev2_712;  __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 3, 2, 1, 0); \
-  int64x2_t __ret_712; \
-  __ret_712 = __noswap_vqdmlsl_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_laneq_s32(__rev2_712, __p3_712)); \
-  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \
-  __ret_712; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \
-  int32x4_t __s0_713 = __p0_713; \
-  int16x8_t __s1_713 = __p1_713; \
-  int16x8_t __s2_713 = __p2_713; \
-  int32x4_t __ret_713; \
-  __ret_713 = vqdmlsl_s16(__s0_713, vget_high_s16(__s1_713), splat_laneq_s16(__s2_713, __p3_713)); \
-  __ret_713; \
-})
-#else
-#define vqdmlsl_high_laneq_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \
-  int32x4_t __s0_714 = __p0_714; \
-  int16x8_t __s1_714 = __p1_714; \
-  int16x8_t __s2_714 = __p2_714; \
-  int32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
-  int16x8_t __rev1_714;  __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_714;  __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_714; \
-  __ret_714 = __noswap_vqdmlsl_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_laneq_s16(__rev2_714, __p3_714)); \
-  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \
-  __ret_714; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \
-  int64x2_t __s0_715 = __p0_715; \
-  int32x2_t __s1_715 = __p1_715; \
-  int32x4_t __s2_715 = __p2_715; \
-  int64x2_t __ret_715; \
-  __ret_715 = vqdmlsl_s32(__s0_715, __s1_715, splat_laneq_s32(__s2_715, __p3_715)); \
-  __ret_715; \
-})
-#else
-#define vqdmlsl_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \
-  int64x2_t __s0_716 = __p0_716; \
-  int32x2_t __s1_716 = __p1_716; \
-  int32x4_t __s2_716 = __p2_716; \
-  int64x2_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \
-  int32x2_t __rev1_716;  __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 1, 0); \
-  int32x4_t __rev2_716;  __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \
-  int64x2_t __ret_716; \
-  __ret_716 = __noswap_vqdmlsl_s32(__rev0_716, __rev1_716, __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \
-  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \
-  __ret_716; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \
-  int32x4_t __s0_717 = __p0_717; \
-  int16x4_t __s1_717 = __p1_717; \
-  int16x8_t __s2_717 = __p2_717; \
-  int32x4_t __ret_717; \
-  __ret_717 = vqdmlsl_s16(__s0_717, __s1_717, splat_laneq_s16(__s2_717, __p3_717)); \
-  __ret_717; \
-})
-#else
-#define vqdmlsl_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \
-  int32x4_t __s0_718 = __p0_718; \
-  int16x4_t __s1_718 = __p1_718; \
-  int16x8_t __s2_718 = __p2_718; \
-  int32x4_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \
-  int16x4_t __rev1_718;  __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 3, 2, 1, 0); \
-  int16x8_t __rev2_718;  __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_718; \
-  __ret_718 = __noswap_vqdmlsl_s16(__rev0_718, __rev1_718, __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \
-  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \
-  __ret_718; \
-})
-#endif
-
-__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_lane_s32(__p0_719, __p1_719, __p2_719) __extension__ ({ \
-  int32_t __s0_719 = __p0_719; \
-  int32x2_t __s1_719 = __p1_719; \
-  int32_t __ret_719; \
-  __ret_719 = vqdmulhs_s32(__s0_719, vget_lane_s32(__s1_719, __p2_719)); \
-  __ret_719; \
-})
-#else
-#define vqdmulhs_lane_s32(__p0_720, __p1_720, __p2_720) __extension__ ({ \
-  int32_t __s0_720 = __p0_720; \
-  int32x2_t __s1_720 = __p1_720; \
-  int32x2_t __rev1_720;  __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 1, 0); \
-  int32_t __ret_720; \
-  __ret_720 = vqdmulhs_s32(__s0_720, __noswap_vget_lane_s32(__rev1_720, __p2_720)); \
-  __ret_720; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_lane_s16(__p0_721, __p1_721, __p2_721) __extension__ ({ \
-  int16_t __s0_721 = __p0_721; \
-  int16x4_t __s1_721 = __p1_721; \
-  int16_t __ret_721; \
-  __ret_721 = vqdmulhh_s16(__s0_721, vget_lane_s16(__s1_721, __p2_721)); \
-  __ret_721; \
-})
-#else
-#define vqdmulhh_lane_s16(__p0_722, __p1_722, __p2_722) __extension__ ({ \
-  int16_t __s0_722 = __p0_722; \
-  int16x4_t __s1_722 = __p1_722; \
-  int16x4_t __rev1_722;  __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 3, 2, 1, 0); \
-  int16_t __ret_722; \
-  __ret_722 = vqdmulhh_s16(__s0_722, __noswap_vget_lane_s16(__rev1_722, __p2_722)); \
-  __ret_722; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_laneq_s32(__p0_723, __p1_723, __p2_723) __extension__ ({ \
-  int32_t __s0_723 = __p0_723; \
-  int32x4_t __s1_723 = __p1_723; \
-  int32_t __ret_723; \
-  __ret_723 = vqdmulhs_s32(__s0_723, vgetq_lane_s32(__s1_723, __p2_723)); \
-  __ret_723; \
-})
-#else
-#define vqdmulhs_laneq_s32(__p0_724, __p1_724, __p2_724) __extension__ ({ \
-  int32_t __s0_724 = __p0_724; \
-  int32x4_t __s1_724 = __p1_724; \
-  int32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
-  int32_t __ret_724; \
-  __ret_724 = vqdmulhs_s32(__s0_724, __noswap_vgetq_lane_s32(__rev1_724, __p2_724)); \
-  __ret_724; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_laneq_s16(__p0_725, __p1_725, __p2_725) __extension__ ({ \
-  int16_t __s0_725 = __p0_725; \
-  int16x8_t __s1_725 = __p1_725; \
-  int16_t __ret_725; \
-  __ret_725 = vqdmulhh_s16(__s0_725, vgetq_lane_s16(__s1_725, __p2_725)); \
-  __ret_725; \
-})
-#else
-#define vqdmulhh_laneq_s16(__p0_726, __p1_726, __p2_726) __extension__ ({ \
-  int16_t __s0_726 = __p0_726; \
-  int16x8_t __s1_726 = __p1_726; \
-  int16x8_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_726; \
-  __ret_726 = vqdmulhh_s16(__s0_726, __noswap_vgetq_lane_s16(__rev1_726, __p2_726)); \
-  __ret_726; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s32(__p0_727, __p1_727, __p2_727) __extension__ ({ \
-  int32x4_t __s0_727 = __p0_727; \
-  int32x2_t __s1_727 = __p1_727; \
-  int64x2_t __ret_727; \
-  __ret_727 = vqdmull_s32(vget_high_s32(__s0_727), splat_lane_s32(__s1_727, __p2_727)); \
-  __ret_727; \
-})
-#else
-#define vqdmull_high_lane_s32(__p0_728, __p1_728, __p2_728) __extension__ ({ \
-  int32x4_t __s0_728 = __p0_728; \
-  int32x2_t __s1_728 = __p1_728; \
-  int32x4_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 3, 2, 1, 0); \
-  int32x2_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 1, 0); \
-  int64x2_t __ret_728; \
-  __ret_728 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_728), __noswap_splat_lane_s32(__rev1_728, __p2_728)); \
-  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \
-  __ret_728; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s16(__p0_729, __p1_729, __p2_729) __extension__ ({ \
-  int16x8_t __s0_729 = __p0_729; \
-  int16x4_t __s1_729 = __p1_729; \
-  int32x4_t __ret_729; \
-  __ret_729 = vqdmull_s16(vget_high_s16(__s0_729), splat_lane_s16(__s1_729, __p2_729)); \
-  __ret_729; \
-})
-#else
-#define vqdmull_high_lane_s16(__p0_730, __p1_730, __p2_730) __extension__ ({ \
-  int16x8_t __s0_730 = __p0_730; \
-  int16x4_t __s1_730 = __p1_730; \
-  int16x8_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 3, 2, 1, 0); \
-  int32x4_t __ret_730; \
-  __ret_730 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_730), __noswap_splat_lane_s16(__rev1_730, __p2_730)); \
-  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \
-  __ret_730; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s32(__p0_731, __p1_731, __p2_731) __extension__ ({ \
-  int32x4_t __s0_731 = __p0_731; \
-  int32x4_t __s1_731 = __p1_731; \
-  int64x2_t __ret_731; \
-  __ret_731 = vqdmull_s32(vget_high_s32(__s0_731), splat_laneq_s32(__s1_731, __p2_731)); \
-  __ret_731; \
-})
-#else
-#define vqdmull_high_laneq_s32(__p0_732, __p1_732, __p2_732) __extension__ ({ \
-  int32x4_t __s0_732 = __p0_732; \
-  int32x4_t __s1_732 = __p1_732; \
-  int32x4_t __rev0_732;  __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 3, 2, 1, 0); \
-  int32x4_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 3, 2, 1, 0); \
-  int64x2_t __ret_732; \
-  __ret_732 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_732), __noswap_splat_laneq_s32(__rev1_732, __p2_732)); \
-  __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 1, 0); \
-  __ret_732; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \
-  int16x8_t __s0_733 = __p0_733; \
-  int16x8_t __s1_733 = __p1_733; \
-  int32x4_t __ret_733; \
-  __ret_733 = vqdmull_s16(vget_high_s16(__s0_733), splat_laneq_s16(__s1_733, __p2_733)); \
-  __ret_733; \
-})
-#else
-#define vqdmull_high_laneq_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \
-  int16x8_t __s0_734 = __p0_734; \
-  int16x8_t __s1_734 = __p1_734; \
-  int16x8_t __rev0_734;  __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_734; \
-  __ret_734 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_734), __noswap_splat_laneq_s16(__rev1_734, __p2_734)); \
-  __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 3, 2, 1, 0); \
-  __ret_734; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_lane_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \
-  int32_t __s0_735 = __p0_735; \
-  int32x2_t __s1_735 = __p1_735; \
-  int64_t __ret_735; \
-  __ret_735 = vqdmulls_s32(__s0_735, vget_lane_s32(__s1_735, __p2_735)); \
-  __ret_735; \
-})
-#else
-#define vqdmulls_lane_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \
-  int32_t __s0_736 = __p0_736; \
-  int32x2_t __s1_736 = __p1_736; \
-  int32x2_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \
-  int64_t __ret_736; \
-  __ret_736 = vqdmulls_s32(__s0_736, __noswap_vget_lane_s32(__rev1_736, __p2_736)); \
-  __ret_736; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_lane_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \
-  int16_t __s0_737 = __p0_737; \
-  int16x4_t __s1_737 = __p1_737; \
-  int32_t __ret_737; \
-  __ret_737 = vqdmullh_s16(__s0_737, vget_lane_s16(__s1_737, __p2_737)); \
-  __ret_737; \
-})
-#else
-#define vqdmullh_lane_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \
-  int16_t __s0_738 = __p0_738; \
-  int16x4_t __s1_738 = __p1_738; \
-  int16x4_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 3, 2, 1, 0); \
-  int32_t __ret_738; \
-  __ret_738 = vqdmullh_s16(__s0_738, __noswap_vget_lane_s16(__rev1_738, __p2_738)); \
-  __ret_738; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_laneq_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \
-  int32_t __s0_739 = __p0_739; \
-  int32x4_t __s1_739 = __p1_739; \
-  int64_t __ret_739; \
-  __ret_739 = vqdmulls_s32(__s0_739, vgetq_lane_s32(__s1_739, __p2_739)); \
-  __ret_739; \
-})
-#else
-#define vqdmulls_laneq_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \
-  int32_t __s0_740 = __p0_740; \
-  int32x4_t __s1_740 = __p1_740; \
-  int32x4_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 3, 2, 1, 0); \
-  int64_t __ret_740; \
-  __ret_740 = vqdmulls_s32(__s0_740, __noswap_vgetq_lane_s32(__rev1_740, __p2_740)); \
-  __ret_740; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_laneq_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \
-  int16_t __s0_741 = __p0_741; \
-  int16x8_t __s1_741 = __p1_741; \
-  int32_t __ret_741; \
-  __ret_741 = vqdmullh_s16(__s0_741, vgetq_lane_s16(__s1_741, __p2_741)); \
-  __ret_741; \
-})
-#else
-#define vqdmullh_laneq_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \
-  int16_t __s0_742 = __p0_742; \
-  int16x8_t __s1_742 = __p1_742; \
-  int16x8_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret_742; \
-  __ret_742 = vqdmullh_s16(__s0_742, __noswap_vgetq_lane_s16(__rev1_742, __p2_742)); \
-  __ret_742; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \
-  int32x2_t __s0_743 = __p0_743; \
-  int32x4_t __s1_743 = __p1_743; \
-  int64x2_t __ret_743; \
-  __ret_743 = vqdmull_s32(__s0_743, splat_laneq_s32(__s1_743, __p2_743)); \
-  __ret_743; \
-})
-#else
-#define vqdmull_laneq_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \
-  int32x2_t __s0_744 = __p0_744; \
-  int32x4_t __s1_744 = __p1_744; \
-  int32x2_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 1, 0); \
-  int32x4_t __rev1_744;  __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 3, 2, 1, 0); \
-  int64x2_t __ret_744; \
-  __ret_744 = __noswap_vqdmull_s32(__rev0_744, __noswap_splat_laneq_s32(__rev1_744, __p2_744)); \
-  __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \
-  __ret_744; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \
-  int16x4_t __s0_745 = __p0_745; \
-  int16x8_t __s1_745 = __p1_745; \
-  int32x4_t __ret_745; \
-  __ret_745 = vqdmull_s16(__s0_745, splat_laneq_s16(__s1_745, __p2_745)); \
-  __ret_745; \
-})
-#else
-#define vqdmull_laneq_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \
-  int16x4_t __s0_746 = __p0_746; \
-  int16x8_t __s1_746 = __p1_746; \
-  int16x4_t __rev0_746;  __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 3, 2, 1, 0); \
-  int16x8_t __rev1_746;  __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_746; \
-  __ret_746 = __noswap_vqdmull_s16(__rev0_746, __noswap_splat_laneq_s16(__rev1_746, __p2_746)); \
-  __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \
-  __ret_746; \
-})
-#endif
-
-__ai int16_t vqmovns_s32(int32_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
-  return __ret;
-}
-__ai int32_t vqmovnd_s64(int64_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
-  return __ret;
-}
-__ai int8_t vqmovnh_s16(int16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
-  return __ret;
-}
-__ai uint16_t vqmovns_u32(uint32_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
-  return __ret;
-}
-__ai uint32_t vqmovnd_u64(uint64_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
-  return __ret;
-}
-__ai uint8_t vqmovnh_u16(uint16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint16_t vqmovuns_s32(int32_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0);
-  return __ret;
-}
-__ai uint32_t vqmovund_s64(int64_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0);
-  return __ret;
-}
-__ai uint8_t vqmovunh_s16(int16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqnegb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqnegs_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqnegh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
-  return __ret;
-}
-__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_lane_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \
-  int32_t __s0_747 = __p0_747; \
-  int32x2_t __s1_747 = __p1_747; \
-  int32_t __ret_747; \
-  __ret_747 = vqrdmulhs_s32(__s0_747, vget_lane_s32(__s1_747, __p2_747)); \
-  __ret_747; \
-})
-#else
-#define vqrdmulhs_lane_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \
-  int32_t __s0_748 = __p0_748; \
-  int32x2_t __s1_748 = __p1_748; \
-  int32x2_t __rev1_748;  __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 1, 0); \
-  int32_t __ret_748; \
-  __ret_748 = vqrdmulhs_s32(__s0_748, __noswap_vget_lane_s32(__rev1_748, __p2_748)); \
-  __ret_748; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_lane_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \
-  int16_t __s0_749 = __p0_749; \
-  int16x4_t __s1_749 = __p1_749; \
-  int16_t __ret_749; \
-  __ret_749 = vqrdmulhh_s16(__s0_749, vget_lane_s16(__s1_749, __p2_749)); \
-  __ret_749; \
-})
-#else
-#define vqrdmulhh_lane_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \
-  int16_t __s0_750 = __p0_750; \
-  int16x4_t __s1_750 = __p1_750; \
-  int16x4_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 3, 2, 1, 0); \
-  int16_t __ret_750; \
-  __ret_750 = vqrdmulhh_s16(__s0_750, __noswap_vget_lane_s16(__rev1_750, __p2_750)); \
-  __ret_750; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_laneq_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \
-  int32_t __s0_751 = __p0_751; \
-  int32x4_t __s1_751 = __p1_751; \
-  int32_t __ret_751; \
-  __ret_751 = vqrdmulhs_s32(__s0_751, vgetq_lane_s32(__s1_751, __p2_751)); \
-  __ret_751; \
-})
-#else
-#define vqrdmulhs_laneq_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \
-  int32_t __s0_752 = __p0_752; \
-  int32x4_t __s1_752 = __p1_752; \
-  int32x4_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 3, 2, 1, 0); \
-  int32_t __ret_752; \
-  __ret_752 = vqrdmulhs_s32(__s0_752, __noswap_vgetq_lane_s32(__rev1_752, __p2_752)); \
-  __ret_752; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_laneq_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \
-  int16_t __s0_753 = __p0_753; \
-  int16x8_t __s1_753 = __p1_753; \
-  int16_t __ret_753; \
-  __ret_753 = vqrdmulhh_s16(__s0_753, vgetq_lane_s16(__s1_753, __p2_753)); \
-  __ret_753; \
-})
-#else
-#define vqrdmulhh_laneq_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \
-  int16_t __s0_754 = __p0_754; \
-  int16x8_t __s1_754 = __p1_754; \
-  int16x8_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_754; \
-  __ret_754 = vqrdmulhh_s16(__s0_754, __noswap_vgetq_lane_s16(__rev1_754, __p2_754)); \
-  __ret_754; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u32(__p0_755, __p1_755, __p2_755) __extension__ ({ \
-  uint16x4_t __s0_755 = __p0_755; \
-  uint32x4_t __s1_755 = __p1_755; \
-  uint16x8_t __ret_755; \
-  __ret_755 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_755), (uint16x4_t)(vqrshrn_n_u32(__s1_755, __p2_755)))); \
-  __ret_755; \
-})
-#else
-#define vqrshrn_high_n_u32(__p0_756, __p1_756, __p2_756) __extension__ ({ \
-  uint16x4_t __s0_756 = __p0_756; \
-  uint32x4_t __s1_756 = __p1_756; \
-  uint16x4_t __rev0_756;  __rev0_756 = __builtin_shufflevector(__s0_756, __s0_756, 3, 2, 1, 0); \
-  uint32x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
-  uint16x8_t __ret_756; \
-  __ret_756 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_756), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_756, __p2_756)))); \
-  __ret_756 = __builtin_shufflevector(__ret_756, __ret_756, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_756; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u64(__p0_757, __p1_757, __p2_757) __extension__ ({ \
-  uint32x2_t __s0_757 = __p0_757; \
-  uint64x2_t __s1_757 = __p1_757; \
-  uint32x4_t __ret_757; \
-  __ret_757 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_757), (uint32x2_t)(vqrshrn_n_u64(__s1_757, __p2_757)))); \
-  __ret_757; \
-})
-#else
-#define vqrshrn_high_n_u64(__p0_758, __p1_758, __p2_758) __extension__ ({ \
-  uint32x2_t __s0_758 = __p0_758; \
-  uint64x2_t __s1_758 = __p1_758; \
-  uint32x2_t __rev0_758;  __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 1, 0); \
-  uint64x2_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 1, 0); \
-  uint32x4_t __ret_758; \
-  __ret_758 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_758), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_758, __p2_758)))); \
-  __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 3, 2, 1, 0); \
-  __ret_758; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u16(__p0_759, __p1_759, __p2_759) __extension__ ({ \
-  uint8x8_t __s0_759 = __p0_759; \
-  uint16x8_t __s1_759 = __p1_759; \
-  uint8x16_t __ret_759; \
-  __ret_759 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_759), (uint8x8_t)(vqrshrn_n_u16(__s1_759, __p2_759)))); \
-  __ret_759; \
-})
-#else
-#define vqrshrn_high_n_u16(__p0_760, __p1_760, __p2_760) __extension__ ({ \
-  uint8x8_t __s0_760 = __p0_760; \
-  uint16x8_t __s1_760 = __p1_760; \
-  uint8x8_t __rev0_760;  __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_760; \
-  __ret_760 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_760), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_760, __p2_760)))); \
-  __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_760; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s32(__p0_761, __p1_761, __p2_761) __extension__ ({ \
-  int16x4_t __s0_761 = __p0_761; \
-  int32x4_t __s1_761 = __p1_761; \
-  int16x8_t __ret_761; \
-  __ret_761 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_761), (int16x4_t)(vqrshrn_n_s32(__s1_761, __p2_761)))); \
-  __ret_761; \
-})
-#else
-#define vqrshrn_high_n_s32(__p0_762, __p1_762, __p2_762) __extension__ ({ \
-  int16x4_t __s0_762 = __p0_762; \
-  int32x4_t __s1_762 = __p1_762; \
-  int16x4_t __rev0_762;  __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 3, 2, 1, 0); \
-  int32x4_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 3, 2, 1, 0); \
-  int16x8_t __ret_762; \
-  __ret_762 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_762), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_762, __p2_762)))); \
-  __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_762; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s64(__p0_763, __p1_763, __p2_763) __extension__ ({ \
-  int32x2_t __s0_763 = __p0_763; \
-  int64x2_t __s1_763 = __p1_763; \
-  int32x4_t __ret_763; \
-  __ret_763 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_763), (int32x2_t)(vqrshrn_n_s64(__s1_763, __p2_763)))); \
-  __ret_763; \
-})
-#else
-#define vqrshrn_high_n_s64(__p0_764, __p1_764, __p2_764) __extension__ ({ \
-  int32x2_t __s0_764 = __p0_764; \
-  int64x2_t __s1_764 = __p1_764; \
-  int32x2_t __rev0_764;  __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 1, 0); \
-  int64x2_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 1, 0); \
-  int32x4_t __ret_764; \
-  __ret_764 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_764), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_764, __p2_764)))); \
-  __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 3, 2, 1, 0); \
-  __ret_764; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
-  int8x8_t __s0_765 = __p0_765; \
-  int16x8_t __s1_765 = __p1_765; \
-  int8x16_t __ret_765; \
-  __ret_765 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_765), (int8x8_t)(vqrshrn_n_s16(__s1_765, __p2_765)))); \
-  __ret_765; \
-})
-#else
-#define vqrshrn_high_n_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
-  int8x8_t __s0_766 = __p0_766; \
-  int16x8_t __s1_766 = __p1_766; \
-  int8x8_t __rev0_766;  __rev0_766 = __builtin_shufflevector(__s0_766, __s0_766, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_766; \
-  __ret_766 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_766), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_766, __p2_766)))); \
-  __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_766; \
-})
-#endif
-
-#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s32(__p0_767, __p1_767, __p2_767) __extension__ ({ \
-  int16x4_t __s0_767 = __p0_767; \
-  int32x4_t __s1_767 = __p1_767; \
-  int16x8_t __ret_767; \
-  __ret_767 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_767), (int16x4_t)(vqrshrun_n_s32(__s1_767, __p2_767)))); \
-  __ret_767; \
-})
-#else
-#define vqrshrun_high_n_s32(__p0_768, __p1_768, __p2_768) __extension__ ({ \
-  int16x4_t __s0_768 = __p0_768; \
-  int32x4_t __s1_768 = __p1_768; \
-  int16x4_t __rev0_768;  __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 3, 2, 1, 0); \
-  int32x4_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \
-  int16x8_t __ret_768; \
-  __ret_768 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_768), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_768, __p2_768)))); \
-  __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_768; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s64(__p0_769, __p1_769, __p2_769) __extension__ ({ \
-  int32x2_t __s0_769 = __p0_769; \
-  int64x2_t __s1_769 = __p1_769; \
-  int32x4_t __ret_769; \
-  __ret_769 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_769), (int32x2_t)(vqrshrun_n_s64(__s1_769, __p2_769)))); \
-  __ret_769; \
-})
-#else
-#define vqrshrun_high_n_s64(__p0_770, __p1_770, __p2_770) __extension__ ({ \
-  int32x2_t __s0_770 = __p0_770; \
-  int64x2_t __s1_770 = __p1_770; \
-  int32x2_t __rev0_770;  __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 1, 0); \
-  int64x2_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 1, 0); \
-  int32x4_t __ret_770; \
-  __ret_770 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_770), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_770, __p2_770)))); \
-  __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \
-  __ret_770; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s16(__p0_771, __p1_771, __p2_771) __extension__ ({ \
-  int8x8_t __s0_771 = __p0_771; \
-  int16x8_t __s1_771 = __p1_771; \
-  int8x16_t __ret_771; \
-  __ret_771 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_771), (int8x8_t)(vqrshrun_n_s16(__s1_771, __p2_771)))); \
-  __ret_771; \
-})
-#else
-#define vqrshrun_high_n_s16(__p0_772, __p1_772, __p2_772) __extension__ ({ \
-  int8x8_t __s0_772 = __p0_772; \
-  int16x8_t __s1_772 = __p1_772; \
-  int8x8_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_772; \
-  __ret_772 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_772), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_772, __p2_772)))); \
-  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_772; \
-})
-#endif
-
-#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
-  return __ret;
-}
-#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u32(__p0_773, __p1_773, __p2_773) __extension__ ({ \
-  uint16x4_t __s0_773 = __p0_773; \
-  uint32x4_t __s1_773 = __p1_773; \
-  uint16x8_t __ret_773; \
-  __ret_773 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_773), (uint16x4_t)(vqshrn_n_u32(__s1_773, __p2_773)))); \
-  __ret_773; \
-})
-#else
-#define vqshrn_high_n_u32(__p0_774, __p1_774, __p2_774) __extension__ ({ \
-  uint16x4_t __s0_774 = __p0_774; \
-  uint32x4_t __s1_774 = __p1_774; \
-  uint16x4_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \
-  uint32x4_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \
-  uint16x8_t __ret_774; \
-  __ret_774 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_774), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_774, __p2_774)))); \
-  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_774; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u64(__p0_775, __p1_775, __p2_775) __extension__ ({ \
-  uint32x2_t __s0_775 = __p0_775; \
-  uint64x2_t __s1_775 = __p1_775; \
-  uint32x4_t __ret_775; \
-  __ret_775 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_775), (uint32x2_t)(vqshrn_n_u64(__s1_775, __p2_775)))); \
-  __ret_775; \
-})
-#else
-#define vqshrn_high_n_u64(__p0_776, __p1_776, __p2_776) __extension__ ({ \
-  uint32x2_t __s0_776 = __p0_776; \
-  uint64x2_t __s1_776 = __p1_776; \
-  uint32x2_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 1, 0); \
-  uint64x2_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 1, 0); \
-  uint32x4_t __ret_776; \
-  __ret_776 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_776), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_776, __p2_776)))); \
-  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 3, 2, 1, 0); \
-  __ret_776; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u16(__p0_777, __p1_777, __p2_777) __extension__ ({ \
-  uint8x8_t __s0_777 = __p0_777; \
-  uint16x8_t __s1_777 = __p1_777; \
-  uint8x16_t __ret_777; \
-  __ret_777 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_777), (uint8x8_t)(vqshrn_n_u16(__s1_777, __p2_777)))); \
-  __ret_777; \
-})
-#else
-#define vqshrn_high_n_u16(__p0_778, __p1_778, __p2_778) __extension__ ({ \
-  uint8x8_t __s0_778 = __p0_778; \
-  uint16x8_t __s1_778 = __p1_778; \
-  uint8x8_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_778; \
-  __ret_778 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_778), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_778, __p2_778)))); \
-  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_778; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s32(__p0_779, __p1_779, __p2_779) __extension__ ({ \
-  int16x4_t __s0_779 = __p0_779; \
-  int32x4_t __s1_779 = __p1_779; \
-  int16x8_t __ret_779; \
-  __ret_779 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_779), (int16x4_t)(vqshrn_n_s32(__s1_779, __p2_779)))); \
-  __ret_779; \
-})
-#else
-#define vqshrn_high_n_s32(__p0_780, __p1_780, __p2_780) __extension__ ({ \
-  int16x4_t __s0_780 = __p0_780; \
-  int32x4_t __s1_780 = __p1_780; \
-  int16x4_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \
-  int32x4_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 3, 2, 1, 0); \
-  int16x8_t __ret_780; \
-  __ret_780 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_780), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_780, __p2_780)))); \
-  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_780; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s64(__p0_781, __p1_781, __p2_781) __extension__ ({ \
-  int32x2_t __s0_781 = __p0_781; \
-  int64x2_t __s1_781 = __p1_781; \
-  int32x4_t __ret_781; \
-  __ret_781 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_781), (int32x2_t)(vqshrn_n_s64(__s1_781, __p2_781)))); \
-  __ret_781; \
-})
-#else
-#define vqshrn_high_n_s64(__p0_782, __p1_782, __p2_782) __extension__ ({ \
-  int32x2_t __s0_782 = __p0_782; \
-  int64x2_t __s1_782 = __p1_782; \
-  int32x2_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \
-  int64x2_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 1, 0); \
-  int32x4_t __ret_782; \
-  __ret_782 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_782), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_782, __p2_782)))); \
-  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 3, 2, 1, 0); \
-  __ret_782; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s16(__p0_783, __p1_783, __p2_783) __extension__ ({ \
-  int8x8_t __s0_783 = __p0_783; \
-  int16x8_t __s1_783 = __p1_783; \
-  int8x16_t __ret_783; \
-  __ret_783 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_783), (int8x8_t)(vqshrn_n_s16(__s1_783, __p2_783)))); \
-  __ret_783; \
-})
-#else
-#define vqshrn_high_n_s16(__p0_784, __p1_784, __p2_784) __extension__ ({ \
-  int8x8_t __s0_784 = __p0_784; \
-  int16x8_t __s1_784 = __p1_784; \
-  int8x8_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_784; \
-  __ret_784 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_784), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_784, __p2_784)))); \
-  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_784; \
-})
-#endif
-
-#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s32(__p0_785, __p1_785, __p2_785) __extension__ ({ \
-  int16x4_t __s0_785 = __p0_785; \
-  int32x4_t __s1_785 = __p1_785; \
-  int16x8_t __ret_785; \
-  __ret_785 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_785), (int16x4_t)(vqshrun_n_s32(__s1_785, __p2_785)))); \
-  __ret_785; \
-})
-#else
-#define vqshrun_high_n_s32(__p0_786, __p1_786, __p2_786) __extension__ ({ \
-  int16x4_t __s0_786 = __p0_786; \
-  int32x4_t __s1_786 = __p1_786; \
-  int16x4_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 3, 2, 1, 0); \
-  int32x4_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 3, 2, 1, 0); \
-  int16x8_t __ret_786; \
-  __ret_786 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_786), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_786, __p2_786)))); \
-  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_786; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s64(__p0_787, __p1_787, __p2_787) __extension__ ({ \
-  int32x2_t __s0_787 = __p0_787; \
-  int64x2_t __s1_787 = __p1_787; \
-  int32x4_t __ret_787; \
-  __ret_787 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_787), (int32x2_t)(vqshrun_n_s64(__s1_787, __p2_787)))); \
-  __ret_787; \
-})
-#else
-#define vqshrun_high_n_s64(__p0_788, __p1_788, __p2_788) __extension__ ({ \
-  int32x2_t __s0_788 = __p0_788; \
-  int64x2_t __s1_788 = __p1_788; \
-  int32x2_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 1, 0); \
-  int64x2_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 1, 0); \
-  int32x4_t __ret_788; \
-  __ret_788 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_788), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_788, __p2_788)))); \
-  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 3, 2, 1, 0); \
-  __ret_788; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s16(__p0_789, __p1_789, __p2_789) __extension__ ({ \
-  int8x8_t __s0_789 = __p0_789; \
-  int16x8_t __s1_789 = __p1_789; \
-  int8x16_t __ret_789; \
-  __ret_789 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_789), (int8x8_t)(vqshrun_n_s16(__s1_789, __p2_789)))); \
-  __ret_789; \
-})
-#else
-#define vqshrun_high_n_s16(__p0_790, __p1_790, __p2_790) __extension__ ({ \
-  int8x8_t __s0_790 = __p0_790; \
-  int16x8_t __s1_790 = __p1_790; \
-  int8x8_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_790; \
-  __ret_790 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_790), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_790, __p2_790)))); \
-  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_790; \
-})
-#endif
-
-#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecpe_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai float64_t vrecped_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpes_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
-  return __ret;
-}
-__ai float64_t vrecpxd_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpxs_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
-  return __ret;
-}
-#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u32(__p0_791, __p1_791, __p2_791) __extension__ ({ \
-  uint16x4_t __s0_791 = __p0_791; \
-  uint32x4_t __s1_791 = __p1_791; \
-  uint16x8_t __ret_791; \
-  __ret_791 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_791), (uint16x4_t)(vrshrn_n_u32(__s1_791, __p2_791)))); \
-  __ret_791; \
-})
-#else
-#define vrshrn_high_n_u32(__p0_792, __p1_792, __p2_792) __extension__ ({ \
-  uint16x4_t __s0_792 = __p0_792; \
-  uint32x4_t __s1_792 = __p1_792; \
-  uint16x4_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 3, 2, 1, 0); \
-  uint32x4_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 3, 2, 1, 0); \
-  uint16x8_t __ret_792; \
-  __ret_792 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_792), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_792, __p2_792)))); \
-  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_792; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u64(__p0_793, __p1_793, __p2_793) __extension__ ({ \
-  uint32x2_t __s0_793 = __p0_793; \
-  uint64x2_t __s1_793 = __p1_793; \
-  uint32x4_t __ret_793; \
-  __ret_793 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_793), (uint32x2_t)(vrshrn_n_u64(__s1_793, __p2_793)))); \
-  __ret_793; \
-})
-#else
-#define vrshrn_high_n_u64(__p0_794, __p1_794, __p2_794) __extension__ ({ \
-  uint32x2_t __s0_794 = __p0_794; \
-  uint64x2_t __s1_794 = __p1_794; \
-  uint32x2_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 1, 0); \
-  uint64x2_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 1, 0); \
-  uint32x4_t __ret_794; \
-  __ret_794 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_794), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_794, __p2_794)))); \
-  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 3, 2, 1, 0); \
-  __ret_794; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u16(__p0_795, __p1_795, __p2_795) __extension__ ({ \
-  uint8x8_t __s0_795 = __p0_795; \
-  uint16x8_t __s1_795 = __p1_795; \
-  uint8x16_t __ret_795; \
-  __ret_795 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_795), (uint8x8_t)(vrshrn_n_u16(__s1_795, __p2_795)))); \
-  __ret_795; \
-})
-#else
-#define vrshrn_high_n_u16(__p0_796, __p1_796, __p2_796) __extension__ ({ \
-  uint8x8_t __s0_796 = __p0_796; \
-  uint16x8_t __s1_796 = __p1_796; \
-  uint8x8_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_796; \
-  __ret_796 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_796), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_796, __p2_796)))); \
-  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_796; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s32(__p0_797, __p1_797, __p2_797) __extension__ ({ \
-  int16x4_t __s0_797 = __p0_797; \
-  int32x4_t __s1_797 = __p1_797; \
-  int16x8_t __ret_797; \
-  __ret_797 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_797), (int16x4_t)(vrshrn_n_s32(__s1_797, __p2_797)))); \
-  __ret_797; \
-})
-#else
-#define vrshrn_high_n_s32(__p0_798, __p1_798, __p2_798) __extension__ ({ \
-  int16x4_t __s0_798 = __p0_798; \
-  int32x4_t __s1_798 = __p1_798; \
-  int16x4_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 3, 2, 1, 0); \
-  int32x4_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 3, 2, 1, 0); \
-  int16x8_t __ret_798; \
-  __ret_798 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_798), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_798, __p2_798)))); \
-  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_798; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s64(__p0_799, __p1_799, __p2_799) __extension__ ({ \
-  int32x2_t __s0_799 = __p0_799; \
-  int64x2_t __s1_799 = __p1_799; \
-  int32x4_t __ret_799; \
-  __ret_799 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_799), (int32x2_t)(vrshrn_n_s64(__s1_799, __p2_799)))); \
-  __ret_799; \
-})
-#else
-#define vrshrn_high_n_s64(__p0_800, __p1_800, __p2_800) __extension__ ({ \
-  int32x2_t __s0_800 = __p0_800; \
-  int64x2_t __s1_800 = __p1_800; \
-  int32x2_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \
-  int64x2_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \
-  int32x4_t __ret_800; \
-  __ret_800 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_800), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_800, __p2_800)))); \
-  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 3, 2, 1, 0); \
-  __ret_800; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s16(__p0_801, __p1_801, __p2_801) __extension__ ({ \
-  int8x8_t __s0_801 = __p0_801; \
-  int16x8_t __s1_801 = __p1_801; \
-  int8x16_t __ret_801; \
-  __ret_801 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_801), (int8x8_t)(vrshrn_n_s16(__s1_801, __p2_801)))); \
-  __ret_801; \
-})
-#else
-#define vrshrn_high_n_s16(__p0_802, __p1_802, __p2_802) __extension__ ({ \
-  int8x8_t __s0_802 = __p0_802; \
-  int16x8_t __s1_802 = __p1_802; \
-  int8x8_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_802; \
-  __ret_802 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_802), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_802, __p2_802)))); \
-  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_802; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai float64_t vrsqrted_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrsqrtes_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
-  return __ret;
-}
-#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \
-  __ret; \
-})
-__ai uint64_t vshld_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
-  return __ret;
-}
-#define vshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u8(__p0_803, __p1_803) __extension__ ({ \
-  uint8x16_t __s0_803 = __p0_803; \
-  uint16x8_t __ret_803; \
-  __ret_803 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_803), __p1_803)); \
-  __ret_803; \
-})
-#else
-#define vshll_high_n_u8(__p0_804, __p1_804) __extension__ ({ \
-  uint8x16_t __s0_804 = __p0_804; \
-  uint8x16_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_804; \
-  __ret_804 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_804), __p1_804)); \
-  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_804; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u32(__p0_805, __p1_805) __extension__ ({ \
-  uint32x4_t __s0_805 = __p0_805; \
-  uint64x2_t __ret_805; \
-  __ret_805 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_805), __p1_805)); \
-  __ret_805; \
-})
-#else
-#define vshll_high_n_u32(__p0_806, __p1_806) __extension__ ({ \
-  uint32x4_t __s0_806 = __p0_806; \
-  uint32x4_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 3, 2, 1, 0); \
-  uint64x2_t __ret_806; \
-  __ret_806 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_806), __p1_806)); \
-  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 1, 0); \
-  __ret_806; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u16(__p0_807, __p1_807) __extension__ ({ \
-  uint16x8_t __s0_807 = __p0_807; \
-  uint32x4_t __ret_807; \
-  __ret_807 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_807), __p1_807)); \
-  __ret_807; \
-})
-#else
-#define vshll_high_n_u16(__p0_808, __p1_808) __extension__ ({ \
-  uint16x8_t __s0_808 = __p0_808; \
-  uint16x8_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_808; \
-  __ret_808 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_808), __p1_808)); \
-  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 3, 2, 1, 0); \
-  __ret_808; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s8(__p0_809, __p1_809) __extension__ ({ \
-  int8x16_t __s0_809 = __p0_809; \
-  int16x8_t __ret_809; \
-  __ret_809 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_809), __p1_809)); \
-  __ret_809; \
-})
-#else
-#define vshll_high_n_s8(__p0_810, __p1_810) __extension__ ({ \
-  int8x16_t __s0_810 = __p0_810; \
-  int8x16_t __rev0_810;  __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_810; \
-  __ret_810 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_810), __p1_810)); \
-  __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_810; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s32(__p0_811, __p1_811) __extension__ ({ \
-  int32x4_t __s0_811 = __p0_811; \
-  int64x2_t __ret_811; \
-  __ret_811 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_811), __p1_811)); \
-  __ret_811; \
-})
-#else
-#define vshll_high_n_s32(__p0_812, __p1_812) __extension__ ({ \
-  int32x4_t __s0_812 = __p0_812; \
-  int32x4_t __rev0_812;  __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 3, 2, 1, 0); \
-  int64x2_t __ret_812; \
-  __ret_812 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_812), __p1_812)); \
-  __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 1, 0); \
-  __ret_812; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s16(__p0_813, __p1_813) __extension__ ({ \
-  int16x8_t __s0_813 = __p0_813; \
-  int32x4_t __ret_813; \
-  __ret_813 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_813), __p1_813)); \
-  __ret_813; \
-})
-#else
-#define vshll_high_n_s16(__p0_814, __p1_814) __extension__ ({ \
-  int16x8_t __s0_814 = __p0_814; \
-  int16x8_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_814; \
-  __ret_814 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_814), __p1_814)); \
-  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 3, 2, 1, 0); \
-  __ret_814; \
-})
-#endif
-
-#define vshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u32(__p0_815, __p1_815, __p2_815) __extension__ ({ \
-  uint16x4_t __s0_815 = __p0_815; \
-  uint32x4_t __s1_815 = __p1_815; \
-  uint16x8_t __ret_815; \
-  __ret_815 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_815), (uint16x4_t)(vshrn_n_u32(__s1_815, __p2_815)))); \
-  __ret_815; \
-})
-#else
-#define vshrn_high_n_u32(__p0_816, __p1_816, __p2_816) __extension__ ({ \
-  uint16x4_t __s0_816 = __p0_816; \
-  uint32x4_t __s1_816 = __p1_816; \
-  uint16x4_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 3, 2, 1, 0); \
-  uint32x4_t __rev1_816;  __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 3, 2, 1, 0); \
-  uint16x8_t __ret_816; \
-  __ret_816 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_816), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_816, __p2_816)))); \
-  __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_816; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u64(__p0_817, __p1_817, __p2_817) __extension__ ({ \
-  uint32x2_t __s0_817 = __p0_817; \
-  uint64x2_t __s1_817 = __p1_817; \
-  uint32x4_t __ret_817; \
-  __ret_817 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_817), (uint32x2_t)(vshrn_n_u64(__s1_817, __p2_817)))); \
-  __ret_817; \
-})
-#else
-#define vshrn_high_n_u64(__p0_818, __p1_818, __p2_818) __extension__ ({ \
-  uint32x2_t __s0_818 = __p0_818; \
-  uint64x2_t __s1_818 = __p1_818; \
-  uint32x2_t __rev0_818;  __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 1, 0); \
-  uint64x2_t __rev1_818;  __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 1, 0); \
-  uint32x4_t __ret_818; \
-  __ret_818 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_818), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_818, __p2_818)))); \
-  __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 3, 2, 1, 0); \
-  __ret_818; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u16(__p0_819, __p1_819, __p2_819) __extension__ ({ \
-  uint8x8_t __s0_819 = __p0_819; \
-  uint16x8_t __s1_819 = __p1_819; \
-  uint8x16_t __ret_819; \
-  __ret_819 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_819), (uint8x8_t)(vshrn_n_u16(__s1_819, __p2_819)))); \
-  __ret_819; \
-})
-#else
-#define vshrn_high_n_u16(__p0_820, __p1_820, __p2_820) __extension__ ({ \
-  uint8x8_t __s0_820 = __p0_820; \
-  uint16x8_t __s1_820 = __p1_820; \
-  uint8x8_t __rev0_820;  __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_820;  __rev1_820 = __builtin_shufflevector(__s1_820, __s1_820, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_820; \
-  __ret_820 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_820), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_820, __p2_820)))); \
-  __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_820; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s32(__p0_821, __p1_821, __p2_821) __extension__ ({ \
-  int16x4_t __s0_821 = __p0_821; \
-  int32x4_t __s1_821 = __p1_821; \
-  int16x8_t __ret_821; \
-  __ret_821 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_821), (int16x4_t)(vshrn_n_s32(__s1_821, __p2_821)))); \
-  __ret_821; \
-})
-#else
-#define vshrn_high_n_s32(__p0_822, __p1_822, __p2_822) __extension__ ({ \
-  int16x4_t __s0_822 = __p0_822; \
-  int32x4_t __s1_822 = __p1_822; \
-  int16x4_t __rev0_822;  __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 3, 2, 1, 0); \
-  int32x4_t __rev1_822;  __rev1_822 = __builtin_shufflevector(__s1_822, __s1_822, 3, 2, 1, 0); \
-  int16x8_t __ret_822; \
-  __ret_822 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_822), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_822, __p2_822)))); \
-  __ret_822 = __builtin_shufflevector(__ret_822, __ret_822, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_822; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s64(__p0_823, __p1_823, __p2_823) __extension__ ({ \
-  int32x2_t __s0_823 = __p0_823; \
-  int64x2_t __s1_823 = __p1_823; \
-  int32x4_t __ret_823; \
-  __ret_823 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_823), (int32x2_t)(vshrn_n_s64(__s1_823, __p2_823)))); \
-  __ret_823; \
-})
-#else
-#define vshrn_high_n_s64(__p0_824, __p1_824, __p2_824) __extension__ ({ \
-  int32x2_t __s0_824 = __p0_824; \
-  int64x2_t __s1_824 = __p1_824; \
-  int32x2_t __rev0_824;  __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 1, 0); \
-  int64x2_t __rev1_824;  __rev1_824 = __builtin_shufflevector(__s1_824, __s1_824, 1, 0); \
-  int32x4_t __ret_824; \
-  __ret_824 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_824), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_824, __p2_824)))); \
-  __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 3, 2, 1, 0); \
-  __ret_824; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s16(__p0_825, __p1_825, __p2_825) __extension__ ({ \
-  int8x8_t __s0_825 = __p0_825; \
-  int16x8_t __s1_825 = __p1_825; \
-  int8x16_t __ret_825; \
-  __ret_825 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_825), (int8x8_t)(vshrn_n_s16(__s1_825, __p2_825)))); \
-  __ret_825; \
-})
-#else
-#define vshrn_high_n_s16(__p0_826, __p1_826, __p2_826) __extension__ ({ \
-  int8x8_t __s0_826 = __p0_826; \
-  int16x8_t __s1_826 = __p1_826; \
-  int8x8_t __rev0_826;  __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_826;  __rev1_826 = __builtin_shufflevector(__s1_826, __s1_826, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_826; \
-  __ret_826 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_826), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_826, __p2_826)))); \
-  __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_826; \
-})
-#endif
-
-#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vsqrt_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vsqrt_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vsqrt_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vst1_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \
-})
-#else
-#define vst1q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \
-})
-#else
-#define vst1q_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \
-})
-#endif
-
-#define vst1_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
-})
-#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
-})
-#else
-#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
-})
-#else
-#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
-})
-#endif
-
-#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
-})
-#define vst1_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
-})
-#else
-#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \
-})
-#else
-#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \
-})
-#endif
-
-#define vst1_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \
-})
-#define vst1_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
-})
-#else
-#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \
-})
-#else
-#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \
-})
-#endif
-
-#define vst1_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \
-})
-#define vst1_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
-})
-#else
-#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \
-})
-#else
-#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \
-})
-#endif
-
-#define vst1_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \
-})
-#define vst2_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
-})
-#else
-#define vst2q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
-})
-#else
-#define vst2q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \
-})
-#else
-#define vst2q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \
-})
-#else
-#define vst2q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \
-})
-#endif
-
-#define vst2_f64(__p0, __p1) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \
-})
-#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
-})
-#else
-#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
-})
-#else
-#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
-})
-#else
-#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
-})
-#else
-#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
-})
-#else
-#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
-})
-#else
-#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
-})
-#else
-#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
-})
-#endif
-
-#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
-})
-#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
-})
-#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
-})
-#define vst3_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
-})
-#else
-#define vst3q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
-})
-#else
-#define vst3q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \
-})
-#else
-#define vst3q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \
-})
-#else
-#define vst3q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \
-})
-#endif
-
-#define vst3_f64(__p0, __p1) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \
-})
-#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
-})
-#else
-#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
-})
-#else
-#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
-})
-#else
-#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
-})
-#else
-#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
-})
-#else
-#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
-})
-#else
-#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
-})
-#else
-#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
-})
-#endif
-
-#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
-})
-#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
-})
-#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
-})
-#define vst4_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
-})
-#else
-#define vst4q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
-})
-#else
-#define vst4q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \
-})
-#else
-#define vst4q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \
-})
-#else
-#define vst4q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \
-})
-#endif
-
-#define vst4_f64(__p0, __p1) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \
-})
-#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
-})
-#else
-#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
-})
-#else
-#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
-})
-#else
-#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
-})
-#else
-#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
-})
-#else
-#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
-})
-#else
-#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
-})
-#else
-#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
-})
-#endif
-
-#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
-})
-#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
-})
-#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
-})
-#define vstrq_p128(__p0, __p1) __extension__ ({ \
-  poly128_t __s1 = __p1; \
-  __builtin_neon_vstrq_p128(__p0, __s1); \
-})
-__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsudotq_laneq_s32(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \
-  int32x4_t __s0_827 = __p0_827; \
-  int8x16_t __s1_827 = __p1_827; \
-  uint8x16_t __s2_827 = __p2_827; \
-  int32x4_t __ret_827; \
-uint8x16_t __reint_827 = __s2_827; \
-  __ret_827 = vusdotq_s32(__s0_827, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_827, __p3_827)), __s1_827); \
-  __ret_827; \
-})
-#else
-#define vsudotq_laneq_s32(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \
-  int32x4_t __s0_828 = __p0_828; \
-  int8x16_t __s1_828 = __p1_828; \
-  uint8x16_t __s2_828 = __p2_828; \
-  int32x4_t __rev0_828;  __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \
-  int8x16_t __rev1_828;  __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_828;  __rev2_828 = __builtin_shufflevector(__s2_828, __s2_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_828; \
-uint8x16_t __reint_828 = __rev2_828; \
-  __ret_828 = __noswap_vusdotq_s32(__rev0_828, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_828, __p3_828)), __rev1_828); \
-  __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 3, 2, 1, 0); \
-  __ret_828; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsudot_laneq_s32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \
-  int32x2_t __s0_829 = __p0_829; \
-  int8x8_t __s1_829 = __p1_829; \
-  uint8x16_t __s2_829 = __p2_829; \
-  int32x2_t __ret_829; \
-uint8x16_t __reint_829 = __s2_829; \
-  __ret_829 = vusdot_s32(__s0_829, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_829, __p3_829)), __s1_829); \
-  __ret_829; \
-})
-#else
-#define vsudot_laneq_s32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \
-  int32x2_t __s0_830 = __p0_830; \
-  int8x8_t __s1_830 = __p1_830; \
-  uint8x16_t __s2_830 = __p2_830; \
-  int32x2_t __rev0_830;  __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 1, 0); \
-  int8x8_t __rev1_830;  __rev1_830 = __builtin_shufflevector(__s1_830, __s1_830, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_830;  __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_830; \
-uint8x16_t __reint_830 = __rev2_830; \
-  __ret_830 = __noswap_vusdot_s32(__rev0_830, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_830, __p3_830)), __rev1_830); \
-  __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 1, 0); \
-  __ret_830; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vtstd_s64(int64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vtstd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdotq_laneq_s32(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \
-  int32x4_t __s0_831 = __p0_831; \
-  uint8x16_t __s1_831 = __p1_831; \
-  int8x16_t __s2_831 = __p2_831; \
-  int32x4_t __ret_831; \
-int8x16_t __reint_831 = __s2_831; \
-  __ret_831 = vusdotq_s32(__s0_831, __s1_831, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_831, __p3_831))); \
-  __ret_831; \
-})
-#else
-#define vusdotq_laneq_s32(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \
-  int32x4_t __s0_832 = __p0_832; \
-  uint8x16_t __s1_832 = __p1_832; \
-  int8x16_t __s2_832 = __p2_832; \
-  int32x4_t __rev0_832;  __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 3, 2, 1, 0); \
-  uint8x16_t __rev1_832;  __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_832;  __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_832; \
-int8x16_t __reint_832 = __rev2_832; \
-  __ret_832 = __noswap_vusdotq_s32(__rev0_832, __rev1_832, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_832, __p3_832))); \
-  __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 3, 2, 1, 0); \
-  __ret_832; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdot_laneq_s32(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \
-  int32x2_t __s0_833 = __p0_833; \
-  uint8x8_t __s1_833 = __p1_833; \
-  int8x16_t __s2_833 = __p2_833; \
-  int32x2_t __ret_833; \
-int8x16_t __reint_833 = __s2_833; \
-  __ret_833 = vusdot_s32(__s0_833, __s1_833, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_833, __p3_833))); \
-  __ret_833; \
-})
-#else
-#define vusdot_laneq_s32(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \
-  int32x2_t __s0_834 = __p0_834; \
-  uint8x8_t __s1_834 = __p1_834; \
-  int8x16_t __s2_834 = __p2_834; \
-  int32x2_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \
-  uint8x8_t __rev1_834;  __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_834;  __rev2_834 = __builtin_shufflevector(__s2_834, __s2_834, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_834; \
-int8x16_t __reint_834 = __rev2_834; \
-  __ret_834 = __noswap_vusdot_s32(__rev0_834, __rev1_834, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_834, __p3_834))); \
-  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 1, 0); \
-  __ret_834; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 + vabdq_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vabdq_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vabdq_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 + vabdq_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vabdq_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vabdq_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 + vabd_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + vabd_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + vabd_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 + vabd_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + vabd_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + vabd_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_u8(__p0) + vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_u32(__p0) + vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_u16(__p0) + vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_s8(__p0) + vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_s32(__p0) + vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_s16(__p0) + vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_f16(__p0_835, __p1_835) __extension__ ({ \
-  float16x4_t __s0_835 = __p0_835; \
-  float16_t __ret_835; \
-float16x4_t __reint_835 = __s0_835; \
-int16_t __reint1_835 = vget_lane_s16(*(int16x4_t *) &__reint_835, __p1_835); \
-  __ret_835 = *(float16_t *) &__reint1_835; \
-  __ret_835; \
-})
-#else
-#define vget_lane_f16(__p0_836, __p1_836) __extension__ ({ \
-  float16x4_t __s0_836 = __p0_836; \
-  float16x4_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 3, 2, 1, 0); \
-  float16_t __ret_836; \
-float16x4_t __reint_836 = __rev0_836; \
-int16_t __reint1_836 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_836, __p1_836); \
-  __ret_836 = *(float16_t *) &__reint1_836; \
-  __ret_836; \
-})
-#define __noswap_vget_lane_f16(__p0_837, __p1_837) __extension__ ({ \
-  float16x4_t __s0_837 = __p0_837; \
-  float16_t __ret_837; \
-float16x4_t __reint_837 = __s0_837; \
-int16_t __reint1_837 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_837, __p1_837); \
-  __ret_837 = *(float16_t *) &__reint1_837; \
-  __ret_837; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f16(__p0_838, __p1_838) __extension__ ({ \
-  float16x8_t __s0_838 = __p0_838; \
-  float16_t __ret_838; \
-float16x8_t __reint_838 = __s0_838; \
-int16_t __reint1_838 = vgetq_lane_s16(*(int16x8_t *) &__reint_838, __p1_838); \
-  __ret_838 = *(float16_t *) &__reint1_838; \
-  __ret_838; \
-})
-#else
-#define vgetq_lane_f16(__p0_839, __p1_839) __extension__ ({ \
-  float16x8_t __s0_839 = __p0_839; \
-  float16x8_t __rev0_839;  __rev0_839 = __builtin_shufflevector(__s0_839, __s0_839, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_839; \
-float16x8_t __reint_839 = __rev0_839; \
-int16_t __reint1_839 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_839, __p1_839); \
-  __ret_839 = *(float16_t *) &__reint1_839; \
-  __ret_839; \
-})
-#define __noswap_vgetq_lane_f16(__p0_840, __p1_840) __extension__ ({ \
-  float16x8_t __s0_840 = __p0_840; \
-  float16_t __ret_840; \
-float16x8_t __reint_840 = __s0_840; \
-int16_t __reint1_840 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_840, __p1_840); \
-  __ret_840 = *(float16_t *) &__reint1_840; \
-  __ret_840; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmull_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __noswap_vmull_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmull_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmull_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vmull_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __noswap_vmull_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vmull_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vmull_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \
-  uint64x2_t __s0_841 = __p0_841; \
-  uint32x2_t __s1_841 = __p1_841; \
-  uint32x2_t __s2_841 = __p2_841; \
-  uint64x2_t __ret_841; \
-  __ret_841 = __s0_841 + vmull_u32(__s1_841, splat_lane_u32(__s2_841, __p3_841)); \
-  __ret_841; \
-})
-#else
-#define vmlal_lane_u32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \
-  uint64x2_t __s0_842 = __p0_842; \
-  uint32x2_t __s1_842 = __p1_842; \
-  uint32x2_t __s2_842 = __p2_842; \
-  uint64x2_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \
-  uint32x2_t __rev1_842;  __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 1, 0); \
-  uint32x2_t __rev2_842;  __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 1, 0); \
-  uint64x2_t __ret_842; \
-  __ret_842 = __rev0_842 + __noswap_vmull_u32(__rev1_842, __noswap_splat_lane_u32(__rev2_842, __p3_842)); \
-  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \
-  __ret_842; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u16(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
-  uint32x4_t __s0_843 = __p0_843; \
-  uint16x4_t __s1_843 = __p1_843; \
-  uint16x4_t __s2_843 = __p2_843; \
-  uint32x4_t __ret_843; \
-  __ret_843 = __s0_843 + vmull_u16(__s1_843, splat_lane_u16(__s2_843, __p3_843)); \
-  __ret_843; \
-})
-#else
-#define vmlal_lane_u16(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
-  uint32x4_t __s0_844 = __p0_844; \
-  uint16x4_t __s1_844 = __p1_844; \
-  uint16x4_t __s2_844 = __p2_844; \
-  uint32x4_t __rev0_844;  __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \
-  uint16x4_t __rev1_844;  __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 3, 2, 1, 0); \
-  uint16x4_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 3, 2, 1, 0); \
-  uint32x4_t __ret_844; \
-  __ret_844 = __rev0_844 + __noswap_vmull_u16(__rev1_844, __noswap_splat_lane_u16(__rev2_844, __p3_844)); \
-  __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \
-  __ret_844; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
-  int64x2_t __s0_845 = __p0_845; \
-  int32x2_t __s1_845 = __p1_845; \
-  int32x2_t __s2_845 = __p2_845; \
-  int64x2_t __ret_845; \
-  __ret_845 = __s0_845 + vmull_s32(__s1_845, splat_lane_s32(__s2_845, __p3_845)); \
-  __ret_845; \
-})
-#else
-#define vmlal_lane_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
-  int64x2_t __s0_846 = __p0_846; \
-  int32x2_t __s1_846 = __p1_846; \
-  int32x2_t __s2_846 = __p2_846; \
-  int64x2_t __rev0_846;  __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \
-  int32x2_t __rev1_846;  __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 1, 0); \
-  int32x2_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \
-  int64x2_t __ret_846; \
-  __ret_846 = __rev0_846 + __noswap_vmull_s32(__rev1_846, __noswap_splat_lane_s32(__rev2_846, __p3_846)); \
-  __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \
-  __ret_846; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s16(__p0_847, __p1_847, __p2_847, __p3_847) __extension__ ({ \
-  int32x4_t __s0_847 = __p0_847; \
-  int16x4_t __s1_847 = __p1_847; \
-  int16x4_t __s2_847 = __p2_847; \
-  int32x4_t __ret_847; \
-  __ret_847 = __s0_847 + vmull_s16(__s1_847, splat_lane_s16(__s2_847, __p3_847)); \
-  __ret_847; \
-})
-#else
-#define vmlal_lane_s16(__p0_848, __p1_848, __p2_848, __p3_848) __extension__ ({ \
-  int32x4_t __s0_848 = __p0_848; \
-  int16x4_t __s1_848 = __p1_848; \
-  int16x4_t __s2_848 = __p2_848; \
-  int32x4_t __rev0_848;  __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \
-  int16x4_t __rev1_848;  __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 3, 2, 1, 0); \
-  int16x4_t __rev2_848;  __rev2_848 = __builtin_shufflevector(__s2_848, __s2_848, 3, 2, 1, 0); \
-  int32x4_t __ret_848; \
-  __ret_848 = __rev0_848 + __noswap_vmull_s16(__rev1_848, __noswap_splat_lane_s16(__rev2_848, __p3_848)); \
-  __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 3, 2, 1, 0); \
-  __ret_848; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmull_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __noswap_vmull_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmull_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmull_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - vmull_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __noswap_vmull_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - vmull_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - vmull_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u32(__p0_849, __p1_849, __p2_849, __p3_849) __extension__ ({ \
-  uint64x2_t __s0_849 = __p0_849; \
-  uint32x2_t __s1_849 = __p1_849; \
-  uint32x2_t __s2_849 = __p2_849; \
-  uint64x2_t __ret_849; \
-  __ret_849 = __s0_849 - vmull_u32(__s1_849, splat_lane_u32(__s2_849, __p3_849)); \
-  __ret_849; \
-})
-#else
-#define vmlsl_lane_u32(__p0_850, __p1_850, __p2_850, __p3_850) __extension__ ({ \
-  uint64x2_t __s0_850 = __p0_850; \
-  uint32x2_t __s1_850 = __p1_850; \
-  uint32x2_t __s2_850 = __p2_850; \
-  uint64x2_t __rev0_850;  __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 1, 0); \
-  uint32x2_t __rev1_850;  __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 1, 0); \
-  uint32x2_t __rev2_850;  __rev2_850 = __builtin_shufflevector(__s2_850, __s2_850, 1, 0); \
-  uint64x2_t __ret_850; \
-  __ret_850 = __rev0_850 - __noswap_vmull_u32(__rev1_850, __noswap_splat_lane_u32(__rev2_850, __p3_850)); \
-  __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 1, 0); \
-  __ret_850; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u16(__p0_851, __p1_851, __p2_851, __p3_851) __extension__ ({ \
-  uint32x4_t __s0_851 = __p0_851; \
-  uint16x4_t __s1_851 = __p1_851; \
-  uint16x4_t __s2_851 = __p2_851; \
-  uint32x4_t __ret_851; \
-  __ret_851 = __s0_851 - vmull_u16(__s1_851, splat_lane_u16(__s2_851, __p3_851)); \
-  __ret_851; \
-})
-#else
-#define vmlsl_lane_u16(__p0_852, __p1_852, __p2_852, __p3_852) __extension__ ({ \
-  uint32x4_t __s0_852 = __p0_852; \
-  uint16x4_t __s1_852 = __p1_852; \
-  uint16x4_t __s2_852 = __p2_852; \
-  uint32x4_t __rev0_852;  __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \
-  uint16x4_t __rev1_852;  __rev1_852 = __builtin_shufflevector(__s1_852, __s1_852, 3, 2, 1, 0); \
-  uint16x4_t __rev2_852;  __rev2_852 = __builtin_shufflevector(__s2_852, __s2_852, 3, 2, 1, 0); \
-  uint32x4_t __ret_852; \
-  __ret_852 = __rev0_852 - __noswap_vmull_u16(__rev1_852, __noswap_splat_lane_u16(__rev2_852, __p3_852)); \
-  __ret_852 = __builtin_shufflevector(__ret_852, __ret_852, 3, 2, 1, 0); \
-  __ret_852; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \
-  int64x2_t __s0_853 = __p0_853; \
-  int32x2_t __s1_853 = __p1_853; \
-  int32x2_t __s2_853 = __p2_853; \
-  int64x2_t __ret_853; \
-  __ret_853 = __s0_853 - vmull_s32(__s1_853, splat_lane_s32(__s2_853, __p3_853)); \
-  __ret_853; \
-})
-#else
-#define vmlsl_lane_s32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \
-  int64x2_t __s0_854 = __p0_854; \
-  int32x2_t __s1_854 = __p1_854; \
-  int32x2_t __s2_854 = __p2_854; \
-  int64x2_t __rev0_854;  __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \
-  int32x2_t __rev1_854;  __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \
-  int32x2_t __rev2_854;  __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \
-  int64x2_t __ret_854; \
-  __ret_854 = __rev0_854 - __noswap_vmull_s32(__rev1_854, __noswap_splat_lane_s32(__rev2_854, __p3_854)); \
-  __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \
-  __ret_854; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \
-  int32x4_t __s0_855 = __p0_855; \
-  int16x4_t __s1_855 = __p1_855; \
-  int16x4_t __s2_855 = __p2_855; \
-  int32x4_t __ret_855; \
-  __ret_855 = __s0_855 - vmull_s16(__s1_855, splat_lane_s16(__s2_855, __p3_855)); \
-  __ret_855; \
-})
-#else
-#define vmlsl_lane_s16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \
-  int32x4_t __s0_856 = __p0_856; \
-  int16x4_t __s1_856 = __p1_856; \
-  int16x4_t __s2_856 = __p2_856; \
-  int32x4_t __rev0_856;  __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \
-  int16x4_t __rev1_856;  __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \
-  int16x4_t __rev2_856;  __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \
-  int32x4_t __ret_856; \
-  __ret_856 = __rev0_856 - __noswap_vmull_s16(__rev1_856, __noswap_splat_lane_s16(__rev2_856, __p3_856)); \
-  __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \
-  __ret_856; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_f16(__p0_857, __p1_857, __p2_857) __extension__ ({ \
-  float16_t __s0_857 = __p0_857; \
-  float16x4_t __s1_857 = __p1_857; \
-  float16x4_t __ret_857; \
-float16_t __reint_857 = __s0_857; \
-float16x4_t __reint1_857 = __s1_857; \
-int16x4_t __reint2_857 = vset_lane_s16(*(int16_t *) &__reint_857, *(int16x4_t *) &__reint1_857, __p2_857); \
-  __ret_857 = *(float16x4_t *) &__reint2_857; \
-  __ret_857; \
-})
-#else
-#define vset_lane_f16(__p0_858, __p1_858, __p2_858) __extension__ ({ \
-  float16_t __s0_858 = __p0_858; \
-  float16x4_t __s1_858 = __p1_858; \
-  float16x4_t __rev1_858;  __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 3, 2, 1, 0); \
-  float16x4_t __ret_858; \
-float16_t __reint_858 = __s0_858; \
-float16x4_t __reint1_858 = __rev1_858; \
-int16x4_t __reint2_858 = __noswap_vset_lane_s16(*(int16_t *) &__reint_858, *(int16x4_t *) &__reint1_858, __p2_858); \
-  __ret_858 = *(float16x4_t *) &__reint2_858; \
-  __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 3, 2, 1, 0); \
-  __ret_858; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f16(__p0_859, __p1_859, __p2_859) __extension__ ({ \
-  float16_t __s0_859 = __p0_859; \
-  float16x8_t __s1_859 = __p1_859; \
-  float16x8_t __ret_859; \
-float16_t __reint_859 = __s0_859; \
-float16x8_t __reint1_859 = __s1_859; \
-int16x8_t __reint2_859 = vsetq_lane_s16(*(int16_t *) &__reint_859, *(int16x8_t *) &__reint1_859, __p2_859); \
-  __ret_859 = *(float16x8_t *) &__reint2_859; \
-  __ret_859; \
-})
-#else
-#define vsetq_lane_f16(__p0_860, __p1_860, __p2_860) __extension__ ({ \
-  float16_t __s0_860 = __p0_860; \
-  float16x8_t __s1_860 = __p1_860; \
-  float16x8_t __rev1_860;  __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_860; \
-float16_t __reint_860 = __s0_860; \
-float16x8_t __reint1_860 = __rev1_860; \
-int16x8_t __reint2_860 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_860, *(int16x8_t *) &__reint1_860, __p2_860); \
-  __ret_860 = *(float16x8_t *) &__reint2_860; \
-  __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_860; \
-})
-#endif
-
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_lane_f32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \
-  float32x4_t __s0_861 = __p0_861; \
-  bfloat16x8_t __s1_861 = __p1_861; \
-  bfloat16x4_t __s2_861 = __p2_861; \
-  float32x4_t __ret_861; \
-  __ret_861 = vbfmlalbq_f32(__s0_861, __s1_861, (bfloat16x8_t) {vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861)}); \
-  __ret_861; \
-})
-#else
-#define vbfmlalbq_lane_f32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \
-  float32x4_t __s0_862 = __p0_862; \
-  bfloat16x8_t __s1_862 = __p1_862; \
-  bfloat16x4_t __s2_862 = __p2_862; \
-  float32x4_t __rev0_862;  __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_862;  __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_862;  __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 3, 2, 1, 0); \
-  float32x4_t __ret_862; \
-  __ret_862 = __noswap_vbfmlalbq_f32(__rev0_862, __rev1_862, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862)}); \
-  __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 3, 2, 1, 0); \
-  __ret_862; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_laneq_f32(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \
-  float32x4_t __s0_863 = __p0_863; \
-  bfloat16x8_t __s1_863 = __p1_863; \
-  bfloat16x8_t __s2_863 = __p2_863; \
-  float32x4_t __ret_863; \
-  __ret_863 = vbfmlalbq_f32(__s0_863, __s1_863, (bfloat16x8_t) {vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863)}); \
-  __ret_863; \
-})
-#else
-#define vbfmlalbq_laneq_f32(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \
-  float32x4_t __s0_864 = __p0_864; \
-  bfloat16x8_t __s1_864 = __p1_864; \
-  bfloat16x8_t __s2_864 = __p2_864; \
-  float32x4_t __rev0_864;  __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_864;  __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_864;  __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_864; \
-  __ret_864 = __noswap_vbfmlalbq_f32(__rev0_864, __rev1_864, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864)}); \
-  __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \
-  __ret_864; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_lane_f32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \
-  float32x4_t __s0_865 = __p0_865; \
-  bfloat16x8_t __s1_865 = __p1_865; \
-  bfloat16x4_t __s2_865 = __p2_865; \
-  float32x4_t __ret_865; \
-  __ret_865 = vbfmlaltq_f32(__s0_865, __s1_865, (bfloat16x8_t) {vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865)}); \
-  __ret_865; \
-})
-#else
-#define vbfmlaltq_lane_f32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \
-  float32x4_t __s0_866 = __p0_866; \
-  bfloat16x8_t __s1_866 = __p1_866; \
-  bfloat16x4_t __s2_866 = __p2_866; \
-  float32x4_t __rev0_866;  __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_866;  __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_866;  __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 3, 2, 1, 0); \
-  float32x4_t __ret_866; \
-  __ret_866 = __noswap_vbfmlaltq_f32(__rev0_866, __rev1_866, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866)}); \
-  __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 3, 2, 1, 0); \
-  __ret_866; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_laneq_f32(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \
-  float32x4_t __s0_867 = __p0_867; \
-  bfloat16x8_t __s1_867 = __p1_867; \
-  bfloat16x8_t __s2_867 = __p2_867; \
-  float32x4_t __ret_867; \
-  __ret_867 = vbfmlaltq_f32(__s0_867, __s1_867, (bfloat16x8_t) {vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867)}); \
-  __ret_867; \
-})
-#else
-#define vbfmlaltq_laneq_f32(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \
-  float32x4_t __s0_868 = __p0_868; \
-  bfloat16x8_t __s1_868 = __p1_868; \
-  bfloat16x8_t __s2_868 = __p2_868; \
-  float32x4_t __rev0_868;  __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_868;  __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_868;  __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_868; \
-  __ret_868 = __noswap_vbfmlaltq_f32(__rev0_868, __rev1_868, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868)}); \
-  __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \
-  __ret_868; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_bf16(vget_high_bf16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_bf16(vget_low_bf16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_high_f16(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \
-  float32x4_t __s0_869 = __p0_869; \
-  float16x8_t __s1_869 = __p1_869; \
-  float16x4_t __s2_869 = __p2_869; \
-  float32x4_t __ret_869; \
-  __ret_869 = vfmlalq_high_f16(__s0_869, __s1_869, (float16x8_t) {vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869)}); \
-  __ret_869; \
-})
-#else
-#define vfmlalq_lane_high_f16(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \
-  float32x4_t __s0_870 = __p0_870; \
-  float16x8_t __s1_870 = __p1_870; \
-  float16x4_t __s2_870 = __p2_870; \
-  float32x4_t __rev0_870;  __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 3, 2, 1, 0); \
-  float16x8_t __rev1_870;  __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_870;  __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 3, 2, 1, 0); \
-  float32x4_t __ret_870; \
-  __ret_870 = __noswap_vfmlalq_high_f16(__rev0_870, __rev1_870, (float16x8_t) {__noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870)}); \
-  __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \
-  __ret_870; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_high_f16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \
-  float32x2_t __s0_871 = __p0_871; \
-  float16x4_t __s1_871 = __p1_871; \
-  float16x4_t __s2_871 = __p2_871; \
-  float32x2_t __ret_871; \
-  __ret_871 = vfmlal_high_f16(__s0_871, __s1_871, (float16x4_t) {vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871)}); \
-  __ret_871; \
-})
-#else
-#define vfmlal_lane_high_f16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \
-  float32x2_t __s0_872 = __p0_872; \
-  float16x4_t __s1_872 = __p1_872; \
-  float16x4_t __s2_872 = __p2_872; \
-  float32x2_t __rev0_872;  __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 1, 0); \
-  float16x4_t __rev1_872;  __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \
-  float16x4_t __rev2_872;  __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \
-  float32x2_t __ret_872; \
-  __ret_872 = __noswap_vfmlal_high_f16(__rev0_872, __rev1_872, (float16x4_t) {__noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872)}); \
-  __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 1, 0); \
-  __ret_872; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_low_f16(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \
-  float32x4_t __s0_873 = __p0_873; \
-  float16x8_t __s1_873 = __p1_873; \
-  float16x4_t __s2_873 = __p2_873; \
-  float32x4_t __ret_873; \
-  __ret_873 = vfmlalq_low_f16(__s0_873, __s1_873, (float16x8_t) {vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873)}); \
-  __ret_873; \
-})
-#else
-#define vfmlalq_lane_low_f16(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \
-  float32x4_t __s0_874 = __p0_874; \
-  float16x8_t __s1_874 = __p1_874; \
-  float16x4_t __s2_874 = __p2_874; \
-  float32x4_t __rev0_874;  __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \
-  float16x8_t __rev1_874;  __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_874;  __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \
-  float32x4_t __ret_874; \
-  __ret_874 = __noswap_vfmlalq_low_f16(__rev0_874, __rev1_874, (float16x8_t) {__noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874)}); \
-  __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \
-  __ret_874; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_low_f16(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \
-  float32x2_t __s0_875 = __p0_875; \
-  float16x4_t __s1_875 = __p1_875; \
-  float16x4_t __s2_875 = __p2_875; \
-  float32x2_t __ret_875; \
-  __ret_875 = vfmlal_low_f16(__s0_875, __s1_875, (float16x4_t) {vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875)}); \
-  __ret_875; \
-})
-#else
-#define vfmlal_lane_low_f16(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \
-  float32x2_t __s0_876 = __p0_876; \
-  float16x4_t __s1_876 = __p1_876; \
-  float16x4_t __s2_876 = __p2_876; \
-  float32x2_t __rev0_876;  __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 1, 0); \
-  float16x4_t __rev1_876;  __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 3, 2, 1, 0); \
-  float16x4_t __rev2_876;  __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 3, 2, 1, 0); \
-  float32x2_t __ret_876; \
-  __ret_876 = __noswap_vfmlal_low_f16(__rev0_876, __rev1_876, (float16x4_t) {__noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876)}); \
-  __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 1, 0); \
-  __ret_876; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_high_f16(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \
-  float32x4_t __s0_877 = __p0_877; \
-  float16x8_t __s1_877 = __p1_877; \
-  float16x8_t __s2_877 = __p2_877; \
-  float32x4_t __ret_877; \
-  __ret_877 = vfmlalq_high_f16(__s0_877, __s1_877, (float16x8_t) {vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877)}); \
-  __ret_877; \
-})
-#else
-#define vfmlalq_laneq_high_f16(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \
-  float32x4_t __s0_878 = __p0_878; \
-  float16x8_t __s1_878 = __p1_878; \
-  float16x8_t __s2_878 = __p2_878; \
-  float32x4_t __rev0_878;  __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \
-  float16x8_t __rev1_878;  __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_878;  __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_878; \
-  __ret_878 = __noswap_vfmlalq_high_f16(__rev0_878, __rev1_878, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878)}); \
-  __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \
-  __ret_878; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_high_f16(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \
-  float32x2_t __s0_879 = __p0_879; \
-  float16x4_t __s1_879 = __p1_879; \
-  float16x8_t __s2_879 = __p2_879; \
-  float32x2_t __ret_879; \
-  __ret_879 = vfmlal_high_f16(__s0_879, __s1_879, (float16x4_t) {vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879)}); \
-  __ret_879; \
-})
-#else
-#define vfmlal_laneq_high_f16(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \
-  float32x2_t __s0_880 = __p0_880; \
-  float16x4_t __s1_880 = __p1_880; \
-  float16x8_t __s2_880 = __p2_880; \
-  float32x2_t __rev0_880;  __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 1, 0); \
-  float16x4_t __rev1_880;  __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 3, 2, 1, 0); \
-  float16x8_t __rev2_880;  __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_880; \
-  __ret_880 = __noswap_vfmlal_high_f16(__rev0_880, __rev1_880, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880)}); \
-  __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 1, 0); \
-  __ret_880; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_low_f16(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \
-  float32x4_t __s0_881 = __p0_881; \
-  float16x8_t __s1_881 = __p1_881; \
-  float16x8_t __s2_881 = __p2_881; \
-  float32x4_t __ret_881; \
-  __ret_881 = vfmlalq_low_f16(__s0_881, __s1_881, (float16x8_t) {vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881)}); \
-  __ret_881; \
-})
-#else
-#define vfmlalq_laneq_low_f16(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \
-  float32x4_t __s0_882 = __p0_882; \
-  float16x8_t __s1_882 = __p1_882; \
-  float16x8_t __s2_882 = __p2_882; \
-  float32x4_t __rev0_882;  __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \
-  float16x8_t __rev1_882;  __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_882;  __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_882; \
-  __ret_882 = __noswap_vfmlalq_low_f16(__rev0_882, __rev1_882, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882)}); \
-  __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \
-  __ret_882; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_low_f16(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \
-  float32x2_t __s0_883 = __p0_883; \
-  float16x4_t __s1_883 = __p1_883; \
-  float16x8_t __s2_883 = __p2_883; \
-  float32x2_t __ret_883; \
-  __ret_883 = vfmlal_low_f16(__s0_883, __s1_883, (float16x4_t) {vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883)}); \
-  __ret_883; \
-})
-#else
-#define vfmlal_laneq_low_f16(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \
-  float32x2_t __s0_884 = __p0_884; \
-  float16x4_t __s1_884 = __p1_884; \
-  float16x8_t __s2_884 = __p2_884; \
-  float32x2_t __rev0_884;  __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 1, 0); \
-  float16x4_t __rev1_884;  __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 3, 2, 1, 0); \
-  float16x8_t __rev2_884;  __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_884; \
-  __ret_884 = __noswap_vfmlal_low_f16(__rev0_884, __rev1_884, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884)}); \
-  __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 1, 0); \
-  __ret_884; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \
-  float32x4_t __s0_885 = __p0_885; \
-  float16x8_t __s1_885 = __p1_885; \
-  float16x4_t __s2_885 = __p2_885; \
-  float32x4_t __ret_885; \
-  __ret_885 = vfmlslq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \
-  __ret_885; \
-})
-#else
-#define vfmlslq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \
-  float32x4_t __s0_886 = __p0_886; \
-  float16x8_t __s1_886 = __p1_886; \
-  float16x4_t __s2_886 = __p2_886; \
-  float32x4_t __rev0_886;  __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 3, 2, 1, 0); \
-  float16x8_t __rev1_886;  __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_886;  __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, 3, 2, 1, 0); \
-  float32x4_t __ret_886; \
-  __ret_886 = __noswap_vfmlslq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \
-  __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 3, 2, 1, 0); \
-  __ret_886; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \
-  float32x2_t __s0_887 = __p0_887; \
-  float16x4_t __s1_887 = __p1_887; \
-  float16x4_t __s2_887 = __p2_887; \
-  float32x2_t __ret_887; \
-  __ret_887 = vfmlsl_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \
-  __ret_887; \
-})
-#else
-#define vfmlsl_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \
-  float32x2_t __s0_888 = __p0_888; \
-  float16x4_t __s1_888 = __p1_888; \
-  float16x4_t __s2_888 = __p2_888; \
-  float32x2_t __rev0_888;  __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \
-  float16x4_t __rev1_888;  __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, 3, 2, 1, 0); \
-  float16x4_t __rev2_888;  __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, 3, 2, 1, 0); \
-  float32x2_t __ret_888; \
-  __ret_888 = __noswap_vfmlsl_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \
-  __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \
-  __ret_888; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \
-  float32x4_t __s0_889 = __p0_889; \
-  float16x8_t __s1_889 = __p1_889; \
-  float16x4_t __s2_889 = __p2_889; \
-  float32x4_t __ret_889; \
-  __ret_889 = vfmlslq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \
-  __ret_889; \
-})
-#else
-#define vfmlslq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \
-  float32x4_t __s0_890 = __p0_890; \
-  float16x8_t __s1_890 = __p1_890; \
-  float16x4_t __s2_890 = __p2_890; \
-  float32x4_t __rev0_890;  __rev0_890 = __builtin_shufflevector(__s0_890, __s0_890, 3, 2, 1, 0); \
-  float16x8_t __rev1_890;  __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_890;  __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 3, 2, 1, 0); \
-  float32x4_t __ret_890; \
-  __ret_890 = __noswap_vfmlslq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \
-  __ret_890 = __builtin_shufflevector(__ret_890, __ret_890, 3, 2, 1, 0); \
-  __ret_890; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \
-  float32x2_t __s0_891 = __p0_891; \
-  float16x4_t __s1_891 = __p1_891; \
-  float16x4_t __s2_891 = __p2_891; \
-  float32x2_t __ret_891; \
-  __ret_891 = vfmlsl_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \
-  __ret_891; \
-})
-#else
-#define vfmlsl_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \
-  float32x2_t __s0_892 = __p0_892; \
-  float16x4_t __s1_892 = __p1_892; \
-  float16x4_t __s2_892 = __p2_892; \
-  float32x2_t __rev0_892;  __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \
-  float16x4_t __rev1_892;  __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, 3, 2, 1, 0); \
-  float16x4_t __rev2_892;  __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 3, 2, 1, 0); \
-  float32x2_t __ret_892; \
-  __ret_892 = __noswap_vfmlsl_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \
-  __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \
-  __ret_892; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \
-  float32x4_t __s0_893 = __p0_893; \
-  float16x8_t __s1_893 = __p1_893; \
-  float16x8_t __s2_893 = __p2_893; \
-  float32x4_t __ret_893; \
-  __ret_893 = vfmlslq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \
-  __ret_893; \
-})
-#else
-#define vfmlslq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \
-  float32x4_t __s0_894 = __p0_894; \
-  float16x8_t __s1_894 = __p1_894; \
-  float16x8_t __s2_894 = __p2_894; \
-  float32x4_t __rev0_894;  __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 3, 2, 1, 0); \
-  float16x8_t __rev1_894;  __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_894;  __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_894; \
-  __ret_894 = __noswap_vfmlslq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \
-  __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 3, 2, 1, 0); \
-  __ret_894; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \
-  float32x2_t __s0_895 = __p0_895; \
-  float16x4_t __s1_895 = __p1_895; \
-  float16x8_t __s2_895 = __p2_895; \
-  float32x2_t __ret_895; \
-  __ret_895 = vfmlsl_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \
-  __ret_895; \
-})
-#else
-#define vfmlsl_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \
-  float32x2_t __s0_896 = __p0_896; \
-  float16x4_t __s1_896 = __p1_896; \
-  float16x8_t __s2_896 = __p2_896; \
-  float32x2_t __rev0_896;  __rev0_896 = __builtin_shufflevector(__s0_896, __s0_896, 1, 0); \
-  float16x4_t __rev1_896;  __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, 3, 2, 1, 0); \
-  float16x8_t __rev2_896;  __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_896; \
-  __ret_896 = __noswap_vfmlsl_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \
-  __ret_896 = __builtin_shufflevector(__ret_896, __ret_896, 1, 0); \
-  __ret_896; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \
-  float32x4_t __s0_897 = __p0_897; \
-  float16x8_t __s1_897 = __p1_897; \
-  float16x8_t __s2_897 = __p2_897; \
-  float32x4_t __ret_897; \
-  __ret_897 = vfmlslq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \
-  __ret_897; \
-})
-#else
-#define vfmlslq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \
-  float32x4_t __s0_898 = __p0_898; \
-  float16x8_t __s1_898 = __p1_898; \
-  float16x8_t __s2_898 = __p2_898; \
-  float32x4_t __rev0_898;  __rev0_898 = __builtin_shufflevector(__s0_898, __s0_898, 3, 2, 1, 0); \
-  float16x8_t __rev1_898;  __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_898;  __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_898; \
-  __ret_898 = __noswap_vfmlslq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \
-  __ret_898 = __builtin_shufflevector(__ret_898, __ret_898, 3, 2, 1, 0); \
-  __ret_898; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \
-  float32x2_t __s0_899 = __p0_899; \
-  float16x4_t __s1_899 = __p1_899; \
-  float16x8_t __s2_899 = __p2_899; \
-  float32x2_t __ret_899; \
-  __ret_899 = vfmlsl_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \
-  __ret_899; \
-})
-#else
-#define vfmlsl_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \
-  float32x2_t __s0_900 = __p0_900; \
-  float16x4_t __s1_900 = __p1_900; \
-  float16x8_t __s2_900 = __p2_900; \
-  float32x2_t __rev0_900;  __rev0_900 = __builtin_shufflevector(__s0_900, __s0_900, 1, 0); \
-  float16x4_t __rev1_900;  __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, 3, 2, 1, 0); \
-  float16x8_t __rev2_900;  __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_900; \
-  __ret_900 = __noswap_vfmlsl_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \
-  __ret_900 = __builtin_shufflevector(__ret_900, __ret_900, 1, 0); \
-  __ret_900; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vmulh_lane_f16(__p0_901, __p1_901, __p2_901) __extension__ ({ \
-  float16_t __s0_901 = __p0_901; \
-  float16x4_t __s1_901 = __p1_901; \
-  float16_t __ret_901; \
-  __ret_901 = __s0_901 * vget_lane_f16(__s1_901, __p2_901); \
-  __ret_901; \
-})
-#else
-#define vmulh_lane_f16(__p0_902, __p1_902, __p2_902) __extension__ ({ \
-  float16_t __s0_902 = __p0_902; \
-  float16x4_t __s1_902 = __p1_902; \
-  float16x4_t __rev1_902;  __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 3, 2, 1, 0); \
-  float16_t __ret_902; \
-  __ret_902 = __s0_902 * __noswap_vget_lane_f16(__rev1_902, __p2_902); \
-  __ret_902; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulh_laneq_f16(__p0_903, __p1_903, __p2_903) __extension__ ({ \
-  float16_t __s0_903 = __p0_903; \
-  float16x8_t __s1_903 = __p1_903; \
-  float16_t __ret_903; \
-  __ret_903 = __s0_903 * vgetq_lane_f16(__s1_903, __p2_903); \
-  __ret_903; \
-})
-#else
-#define vmulh_laneq_f16(__p0_904, __p1_904, __p2_904) __extension__ ({ \
-  float16_t __s0_904 = __p0_904; \
-  float16x8_t __s1_904 = __p1_904; \
-  float16x8_t __rev1_904;  __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_904; \
-  __ret_904 = __s0_904 * __noswap_vgetq_lane_f16(__rev1_904, __p2_904); \
-  __ret_904; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_MATMUL_INT8)
-#ifdef __LITTLE_ENDIAN__
-#define vsudotq_lane_s32(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \
-  int32x4_t __s0_905 = __p0_905; \
-  int8x16_t __s1_905 = __p1_905; \
-  uint8x8_t __s2_905 = __p2_905; \
-  int32x4_t __ret_905; \
-uint8x8_t __reint_905 = __s2_905; \
-  __ret_905 = vusdotq_s32(__s0_905, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_905, __p3_905)), __s1_905); \
-  __ret_905; \
-})
-#else
-#define vsudotq_lane_s32(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \
-  int32x4_t __s0_906 = __p0_906; \
-  int8x16_t __s1_906 = __p1_906; \
-  uint8x8_t __s2_906 = __p2_906; \
-  int32x4_t __rev0_906;  __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \
-  int8x16_t __rev1_906;  __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_906;  __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_906; \
-uint8x8_t __reint_906 = __rev2_906; \
-  __ret_906 = __noswap_vusdotq_s32(__rev0_906, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_906, __p3_906)), __rev1_906); \
-  __ret_906 = __builtin_shufflevector(__ret_906, __ret_906, 3, 2, 1, 0); \
-  __ret_906; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsudot_lane_s32(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \
-  int32x2_t __s0_907 = __p0_907; \
-  int8x8_t __s1_907 = __p1_907; \
-  uint8x8_t __s2_907 = __p2_907; \
-  int32x2_t __ret_907; \
-uint8x8_t __reint_907 = __s2_907; \
-  __ret_907 = vusdot_s32(__s0_907, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_907, __p3_907)), __s1_907); \
-  __ret_907; \
-})
-#else
-#define vsudot_lane_s32(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \
-  int32x2_t __s0_908 = __p0_908; \
-  int8x8_t __s1_908 = __p1_908; \
-  uint8x8_t __s2_908 = __p2_908; \
-  int32x2_t __rev0_908;  __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \
-  int8x8_t __rev1_908;  __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_908;  __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_908; \
-uint8x8_t __reint_908 = __rev2_908; \
-  __ret_908 = __noswap_vusdot_s32(__rev0_908, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_908, __p3_908)), __rev1_908); \
-  __ret_908 = __builtin_shufflevector(__ret_908, __ret_908, 1, 0); \
-  __ret_908; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_lane_s32(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \
-  int32_t __s0_909 = __p0_909; \
-  int32_t __s1_909 = __p1_909; \
-  int32x2_t __s2_909 = __p2_909; \
-  int32_t __ret_909; \
-  __ret_909 = vqadds_s32(__s0_909, vqrdmulhs_s32(__s1_909, vget_lane_s32(__s2_909, __p3_909))); \
-  __ret_909; \
-})
-#else
-#define vqrdmlahs_lane_s32(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \
-  int32_t __s0_910 = __p0_910; \
-  int32_t __s1_910 = __p1_910; \
-  int32x2_t __s2_910 = __p2_910; \
-  int32x2_t __rev2_910;  __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 1, 0); \
-  int32_t __ret_910; \
-  __ret_910 = vqadds_s32(__s0_910, vqrdmulhs_s32(__s1_910, __noswap_vget_lane_s32(__rev2_910, __p3_910))); \
-  __ret_910; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_lane_s16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \
-  int16_t __s0_911 = __p0_911; \
-  int16_t __s1_911 = __p1_911; \
-  int16x4_t __s2_911 = __p2_911; \
-  int16_t __ret_911; \
-  __ret_911 = vqaddh_s16(__s0_911, vqrdmulhh_s16(__s1_911, vget_lane_s16(__s2_911, __p3_911))); \
-  __ret_911; \
-})
-#else
-#define vqrdmlahh_lane_s16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \
-  int16_t __s0_912 = __p0_912; \
-  int16_t __s1_912 = __p1_912; \
-  int16x4_t __s2_912 = __p2_912; \
-  int16x4_t __rev2_912;  __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 3, 2, 1, 0); \
-  int16_t __ret_912; \
-  __ret_912 = vqaddh_s16(__s0_912, vqrdmulhh_s16(__s1_912, __noswap_vget_lane_s16(__rev2_912, __p3_912))); \
-  __ret_912; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_laneq_s32(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \
-  int32_t __s0_913 = __p0_913; \
-  int32_t __s1_913 = __p1_913; \
-  int32x4_t __s2_913 = __p2_913; \
-  int32_t __ret_913; \
-  __ret_913 = vqadds_s32(__s0_913, vqrdmulhs_s32(__s1_913, vgetq_lane_s32(__s2_913, __p3_913))); \
-  __ret_913; \
-})
-#else
-#define vqrdmlahs_laneq_s32(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \
-  int32_t __s0_914 = __p0_914; \
-  int32_t __s1_914 = __p1_914; \
-  int32x4_t __s2_914 = __p2_914; \
-  int32x4_t __rev2_914;  __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 3, 2, 1, 0); \
-  int32_t __ret_914; \
-  __ret_914 = vqadds_s32(__s0_914, vqrdmulhs_s32(__s1_914, __noswap_vgetq_lane_s32(__rev2_914, __p3_914))); \
-  __ret_914; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_laneq_s16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \
-  int16_t __s0_915 = __p0_915; \
-  int16_t __s1_915 = __p1_915; \
-  int16x8_t __s2_915 = __p2_915; \
-  int16_t __ret_915; \
-  __ret_915 = vqaddh_s16(__s0_915, vqrdmulhh_s16(__s1_915, vgetq_lane_s16(__s2_915, __p3_915))); \
-  __ret_915; \
-})
-#else
-#define vqrdmlahh_laneq_s16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \
-  int16_t __s0_916 = __p0_916; \
-  int16_t __s1_916 = __p1_916; \
-  int16x8_t __s2_916 = __p2_916; \
-  int16x8_t __rev2_916;  __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_916; \
-  __ret_916 = vqaddh_s16(__s0_916, vqrdmulhh_s16(__s1_916, __noswap_vgetq_lane_s16(__rev2_916, __p3_916))); \
-  __ret_916; \
-})
-#endif
-
-__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_lane_s32(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \
-  int32_t __s0_917 = __p0_917; \
-  int32_t __s1_917 = __p1_917; \
-  int32x2_t __s2_917 = __p2_917; \
-  int32_t __ret_917; \
-  __ret_917 = vqsubs_s32(__s0_917, vqrdmulhs_s32(__s1_917, vget_lane_s32(__s2_917, __p3_917))); \
-  __ret_917; \
-})
-#else
-#define vqrdmlshs_lane_s32(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \
-  int32_t __s0_918 = __p0_918; \
-  int32_t __s1_918 = __p1_918; \
-  int32x2_t __s2_918 = __p2_918; \
-  int32x2_t __rev2_918;  __rev2_918 = __builtin_shufflevector(__s2_918, __s2_918, 1, 0); \
-  int32_t __ret_918; \
-  __ret_918 = vqsubs_s32(__s0_918, vqrdmulhs_s32(__s1_918, __noswap_vget_lane_s32(__rev2_918, __p3_918))); \
-  __ret_918; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_lane_s16(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \
-  int16_t __s0_919 = __p0_919; \
-  int16_t __s1_919 = __p1_919; \
-  int16x4_t __s2_919 = __p2_919; \
-  int16_t __ret_919; \
-  __ret_919 = vqsubh_s16(__s0_919, vqrdmulhh_s16(__s1_919, vget_lane_s16(__s2_919, __p3_919))); \
-  __ret_919; \
-})
-#else
-#define vqrdmlshh_lane_s16(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \
-  int16_t __s0_920 = __p0_920; \
-  int16_t __s1_920 = __p1_920; \
-  int16x4_t __s2_920 = __p2_920; \
-  int16x4_t __rev2_920;  __rev2_920 = __builtin_shufflevector(__s2_920, __s2_920, 3, 2, 1, 0); \
-  int16_t __ret_920; \
-  __ret_920 = vqsubh_s16(__s0_920, vqrdmulhh_s16(__s1_920, __noswap_vget_lane_s16(__rev2_920, __p3_920))); \
-  __ret_920; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_laneq_s32(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \
-  int32_t __s0_921 = __p0_921; \
-  int32_t __s1_921 = __p1_921; \
-  int32x4_t __s2_921 = __p2_921; \
-  int32_t __ret_921; \
-  __ret_921 = vqsubs_s32(__s0_921, vqrdmulhs_s32(__s1_921, vgetq_lane_s32(__s2_921, __p3_921))); \
-  __ret_921; \
-})
-#else
-#define vqrdmlshs_laneq_s32(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \
-  int32_t __s0_922 = __p0_922; \
-  int32_t __s1_922 = __p1_922; \
-  int32x4_t __s2_922 = __p2_922; \
-  int32x4_t __rev2_922;  __rev2_922 = __builtin_shufflevector(__s2_922, __s2_922, 3, 2, 1, 0); \
-  int32_t __ret_922; \
-  __ret_922 = vqsubs_s32(__s0_922, vqrdmulhs_s32(__s1_922, __noswap_vgetq_lane_s32(__rev2_922, __p3_922))); \
-  __ret_922; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_laneq_s16(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \
-  int16_t __s0_923 = __p0_923; \
-  int16_t __s1_923 = __p1_923; \
-  int16x8_t __s2_923 = __p2_923; \
-  int16_t __ret_923; \
-  __ret_923 = vqsubh_s16(__s0_923, vqrdmulhh_s16(__s1_923, vgetq_lane_s16(__s2_923, __p3_923))); \
-  __ret_923; \
-})
-#else
-#define vqrdmlshh_laneq_s16(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \
-  int16_t __s0_924 = __p0_924; \
-  int16_t __s1_924 = __p1_924; \
-  int16x8_t __s2_924 = __p2_924; \
-  int16x8_t __rev2_924;  __rev2_924 = __builtin_shufflevector(__s2_924, __s2_924, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_924; \
-  __ret_924 = vqsubh_s16(__s0_924, vqrdmulhh_s16(__s1_924, __noswap_vgetq_lane_s16(__rev2_924, __p3_924))); \
-  __ret_924; \
-})
-#endif
-
-#endif
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p64(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \
-  poly64x2_t __s0_925 = __p0_925; \
-  poly64x1_t __s2_925 = __p2_925; \
-  poly64x2_t __ret_925; \
-  __ret_925 = vsetq_lane_p64(vget_lane_p64(__s2_925, __p3_925), __s0_925, __p1_925); \
-  __ret_925; \
-})
-#else
-#define vcopyq_lane_p64(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \
-  poly64x2_t __s0_926 = __p0_926; \
-  poly64x1_t __s2_926 = __p2_926; \
-  poly64x2_t __rev0_926;  __rev0_926 = __builtin_shufflevector(__s0_926, __s0_926, 1, 0); \
-  poly64x2_t __ret_926; \
-  __ret_926 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_926, __p3_926), __rev0_926, __p1_926); \
-  __ret_926 = __builtin_shufflevector(__ret_926, __ret_926, 1, 0); \
-  __ret_926; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f64(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \
-  float64x2_t __s0_927 = __p0_927; \
-  float64x1_t __s2_927 = __p2_927; \
-  float64x2_t __ret_927; \
-  __ret_927 = vsetq_lane_f64(vget_lane_f64(__s2_927, __p3_927), __s0_927, __p1_927); \
-  __ret_927; \
-})
-#else
-#define vcopyq_lane_f64(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \
-  float64x2_t __s0_928 = __p0_928; \
-  float64x1_t __s2_928 = __p2_928; \
-  float64x2_t __rev0_928;  __rev0_928 = __builtin_shufflevector(__s0_928, __s0_928, 1, 0); \
-  float64x2_t __ret_928; \
-  __ret_928 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_928, __p3_928), __rev0_928, __p1_928); \
-  __ret_928 = __builtin_shufflevector(__ret_928, __ret_928, 1, 0); \
-  __ret_928; \
-})
-#endif
-
-#define vcopy_lane_p64(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \
-  poly64x1_t __s0_929 = __p0_929; \
-  poly64x1_t __s2_929 = __p2_929; \
-  poly64x1_t __ret_929; \
-  __ret_929 = vset_lane_p64(vget_lane_p64(__s2_929, __p3_929), __s0_929, __p1_929); \
-  __ret_929; \
-})
-#define vcopy_lane_f64(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \
-  float64x1_t __s0_930 = __p0_930; \
-  float64x1_t __s2_930 = __p2_930; \
-  float64x1_t __ret_930; \
-  __ret_930 = vset_lane_f64(vget_lane_f64(__s2_930, __p3_930), __s0_930, __p1_930); \
-  __ret_930; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p64(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \
-  poly64x2_t __s0_931 = __p0_931; \
-  poly64x2_t __s2_931 = __p2_931; \
-  poly64x2_t __ret_931; \
-  __ret_931 = vsetq_lane_p64(vgetq_lane_p64(__s2_931, __p3_931), __s0_931, __p1_931); \
-  __ret_931; \
-})
-#else
-#define vcopyq_laneq_p64(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \
-  poly64x2_t __s0_932 = __p0_932; \
-  poly64x2_t __s2_932 = __p2_932; \
-  poly64x2_t __rev0_932;  __rev0_932 = __builtin_shufflevector(__s0_932, __s0_932, 1, 0); \
-  poly64x2_t __rev2_932;  __rev2_932 = __builtin_shufflevector(__s2_932, __s2_932, 1, 0); \
-  poly64x2_t __ret_932; \
-  __ret_932 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_932, __p3_932), __rev0_932, __p1_932); \
-  __ret_932 = __builtin_shufflevector(__ret_932, __ret_932, 1, 0); \
-  __ret_932; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f64(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \
-  float64x2_t __s0_933 = __p0_933; \
-  float64x2_t __s2_933 = __p2_933; \
-  float64x2_t __ret_933; \
-  __ret_933 = vsetq_lane_f64(vgetq_lane_f64(__s2_933, __p3_933), __s0_933, __p1_933); \
-  __ret_933; \
-})
-#else
-#define vcopyq_laneq_f64(__p0_934, __p1_934, __p2_934, __p3_934) __extension__ ({ \
-  float64x2_t __s0_934 = __p0_934; \
-  float64x2_t __s2_934 = __p2_934; \
-  float64x2_t __rev0_934;  __rev0_934 = __builtin_shufflevector(__s0_934, __s0_934, 1, 0); \
-  float64x2_t __rev2_934;  __rev2_934 = __builtin_shufflevector(__s2_934, __s2_934, 1, 0); \
-  float64x2_t __ret_934; \
-  __ret_934 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_934, __p3_934), __rev0_934, __p1_934); \
-  __ret_934 = __builtin_shufflevector(__ret_934, __ret_934, 1, 0); \
-  __ret_934; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p64(__p0_935, __p1_935, __p2_935, __p3_935) __extension__ ({ \
-  poly64x1_t __s0_935 = __p0_935; \
-  poly64x2_t __s2_935 = __p2_935; \
-  poly64x1_t __ret_935; \
-  __ret_935 = vset_lane_p64(vgetq_lane_p64(__s2_935, __p3_935), __s0_935, __p1_935); \
-  __ret_935; \
-})
-#else
-#define vcopy_laneq_p64(__p0_936, __p1_936, __p2_936, __p3_936) __extension__ ({ \
-  poly64x1_t __s0_936 = __p0_936; \
-  poly64x2_t __s2_936 = __p2_936; \
-  poly64x2_t __rev2_936;  __rev2_936 = __builtin_shufflevector(__s2_936, __s2_936, 1, 0); \
-  poly64x1_t __ret_936; \
-  __ret_936 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_936, __p3_936), __s0_936, __p1_936); \
-  __ret_936; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f64(__p0_937, __p1_937, __p2_937, __p3_937) __extension__ ({ \
-  float64x1_t __s0_937 = __p0_937; \
-  float64x2_t __s2_937 = __p2_937; \
-  float64x1_t __ret_937; \
-  __ret_937 = vset_lane_f64(vgetq_lane_f64(__s2_937, __p3_937), __s0_937, __p1_937); \
-  __ret_937; \
-})
-#else
-#define vcopy_laneq_f64(__p0_938, __p1_938, __p2_938, __p3_938) __extension__ ({ \
-  float64x1_t __s0_938 = __p0_938; \
-  float64x2_t __s2_938 = __p2_938; \
-  float64x2_t __rev2_938;  __rev2_938 = __builtin_shufflevector(__s2_938, __s2_938, 1, 0); \
-  float64x1_t __ret_938; \
-  __ret_938 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_938, __p3_938), __s0_938, __p1_938); \
-  __ret_938; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vmulx_lane_f64(__p0_939, __p1_939, __p2_939) __extension__ ({ \
-  float64x1_t __s0_939 = __p0_939; \
-  float64x1_t __s1_939 = __p1_939; \
-  float64x1_t __ret_939; \
-  float64_t __x_939 = vget_lane_f64(__s0_939, 0); \
-  float64_t __y_939 = vget_lane_f64(__s1_939, __p2_939); \
-  float64_t __z_939 = vmulxd_f64(__x_939, __y_939); \
-  __ret_939 = vset_lane_f64(__z_939, __s0_939, __p2_939); \
-  __ret_939; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f64(__p0_940, __p1_940, __p2_940) __extension__ ({ \
-  float64x1_t __s0_940 = __p0_940; \
-  float64x2_t __s1_940 = __p1_940; \
-  float64x1_t __ret_940; \
-  float64_t __x_940 = vget_lane_f64(__s0_940, 0); \
-  float64_t __y_940 = vgetq_lane_f64(__s1_940, __p2_940); \
-  float64_t __z_940 = vmulxd_f64(__x_940, __y_940); \
-  __ret_940 = vset_lane_f64(__z_940, __s0_940, 0); \
-  __ret_940; \
-})
-#else
-#define vmulx_laneq_f64(__p0_941, __p1_941, __p2_941) __extension__ ({ \
-  float64x1_t __s0_941 = __p0_941; \
-  float64x2_t __s1_941 = __p1_941; \
-  float64x2_t __rev1_941;  __rev1_941 = __builtin_shufflevector(__s1_941, __s1_941, 1, 0); \
-  float64x1_t __ret_941; \
-  float64_t __x_941 = vget_lane_f64(__s0_941, 0); \
-  float64_t __y_941 = __noswap_vgetq_lane_f64(__rev1_941, __p2_941); \
-  float64_t __z_941 = vmulxd_f64(__x_941, __y_941); \
-  __ret_941 = vset_lane_f64(__z_941, __s0_941, 0); \
-  __ret_941; \
-})
-#endif
-
-#endif
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vabdl_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __noswap_vabdl_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vabdl_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vabdl_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vabdl_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vabdl_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vabdl_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __noswap_vabdl_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vabdl_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vabdl_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vabdl_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vabdl_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-
-#undef __ai
-
-#endif /* if !defined(__ARM_NEON) */
-#endif /* ifndef __ARM_FP */
-#endif /* __ARM_NEON_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512bwintrin.h b/linux-x86/lib64/clang/14.0.2/include/avx512bwintrin.h
deleted file mode 100644
index 6aee8ae..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avx512bwintrin.h
+++ /dev/null
@@ -1,2024 +0,0 @@
-/*===------------- avx512bwintrin.h - AVX512BW intrinsics ------------------===
- *
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512bwintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512BWINTRIN_H
-#define __AVX512BWINTRIN_H
-
-typedef unsigned int __mmask32;
-typedef unsigned long long __mmask64;
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw"), __min_vector_width__(512)))
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw")))
-
-static __inline __mmask32 __DEFAULT_FN_ATTRS
-_knot_mask32(__mmask32 __M)
-{
-  return __builtin_ia32_knotsi(__M);
-}
-
-static __inline __mmask64 __DEFAULT_FN_ATTRS
-_knot_mask64(__mmask64 __M)
-{
-  return __builtin_ia32_knotdi(__M);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kand_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kandsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kand_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kanddi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kandn_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kandnsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kandn_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kandndi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kor_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_korsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kor_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kordi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kxnor_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kxnorsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kxnor_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kxnordi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kxor_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kxorsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kxor_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kxordi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestc_mask32_u8(__mmask32 __A, __mmask32 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestcsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestz_mask32_u8(__mmask32 __A, __mmask32 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestzsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_kortestcsi(__A, __B);
-  return (unsigned char)__builtin_ia32_kortestzsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestc_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestcdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestz_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestzdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_kortestcdi(__A, __B);
-  return (unsigned char)__builtin_ia32_kortestzdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestc_mask32_u8(__mmask32 __A, __mmask32 __B)
-{
-  return (unsigned char)__builtin_ia32_ktestcsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestz_mask32_u8(__mmask32 __A, __mmask32 __B)
-{
-  return (unsigned char)__builtin_ia32_ktestzsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_ktestcsi(__A, __B);
-  return (unsigned char)__builtin_ia32_ktestzsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestc_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
-  return (unsigned char)__builtin_ia32_ktestcdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestz_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
-  return (unsigned char)__builtin_ia32_ktestzdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_ktestcdi(__A, __B);
-  return (unsigned char)__builtin_ia32_ktestzdi(__A, __B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kadd_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kaddsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kadd_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kadddi((__mmask64)__A, (__mmask64)__B);
-}
-
-#define _kshiftli_mask32(A, I) \
-  ((__mmask32)__builtin_ia32_kshiftlisi((__mmask32)(A), (unsigned int)(I)))
-
-#define _kshiftri_mask32(A, I) \
-  ((__mmask32)__builtin_ia32_kshiftrisi((__mmask32)(A), (unsigned int)(I)))
-
-#define _kshiftli_mask64(A, I) \
-  ((__mmask64)__builtin_ia32_kshiftlidi((__mmask64)(A), (unsigned int)(I)))
-
-#define _kshiftri_mask64(A, I) \
-  ((__mmask64)__builtin_ia32_kshiftridi((__mmask64)(A), (unsigned int)(I)))
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_cvtmask32_u32(__mmask32 __A) {
-  return (unsigned int)__builtin_ia32_kmovd((__mmask32)__A);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_cvtmask64_u64(__mmask64 __A) {
-  return (unsigned long long)__builtin_ia32_kmovq((__mmask64)__A);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_cvtu32_mask32(unsigned int __A) {
-  return (__mmask32)__builtin_ia32_kmovd((__mmask32)__A);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_cvtu64_mask64(unsigned long long __A) {
-  return (__mmask64)__builtin_ia32_kmovq((__mmask64)__A);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_load_mask32(__mmask32 *__A) {
-  return (__mmask32)__builtin_ia32_kmovd(*(__mmask32 *)__A);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_load_mask64(__mmask64 *__A) {
-  return (__mmask64)__builtin_ia32_kmovq(*(__mmask64 *)__A);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_store_mask32(__mmask32 *__A, __mmask32 __B) {
-  *(__mmask32 *)__A = __builtin_ia32_kmovd((__mmask32)__B);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_store_mask64(__mmask64 *__A, __mmask64 __B) {
-  *(__mmask64 *)__A = __builtin_ia32_kmovq((__mmask64)__B);
-}
-
-/* Integer compare */
-
-#define _mm512_cmp_epi8_mask(a, b, p) \
-  ((__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
-                                          (__v64qi)(__m512i)(b), (int)(p), \
-                                          (__mmask64)-1))
-
-#define _mm512_mask_cmp_epi8_mask(m, a, b, p) \
-  ((__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
-                                          (__v64qi)(__m512i)(b), (int)(p), \
-                                          (__mmask64)(m)))
-
-#define _mm512_cmp_epu8_mask(a, b, p) \
-  ((__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
-                                           (__v64qi)(__m512i)(b), (int)(p), \
-                                           (__mmask64)-1))
-
-#define _mm512_mask_cmp_epu8_mask(m, a, b, p) \
-  ((__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
-                                           (__v64qi)(__m512i)(b), (int)(p), \
-                                           (__mmask64)(m)))
-
-#define _mm512_cmp_epi16_mask(a, b, p) \
-  ((__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
-                                          (__v32hi)(__m512i)(b), (int)(p), \
-                                          (__mmask32)-1))
-
-#define _mm512_mask_cmp_epi16_mask(m, a, b, p) \
-  ((__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
-                                          (__v32hi)(__m512i)(b), (int)(p), \
-                                          (__mmask32)(m)))
-
-#define _mm512_cmp_epu16_mask(a, b, p) \
-  ((__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
-                                           (__v32hi)(__m512i)(b), (int)(p), \
-                                           (__mmask32)-1))
-
-#define _mm512_mask_cmp_epu16_mask(m, a, b, p) \
-  ((__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
-                                           (__v32hi)(__m512i)(b), (int)(p), \
-                                           (__mmask32)(m)))
-
-#define _mm512_cmpeq_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi8 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v64qu) __A + (__v64qu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_add_epi8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_add_epi8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi8 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v64qu) __A - (__v64qu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_sub_epi8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_sub_epi8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v32hu) __A + (__v32hu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_add_epi16(__A, __B),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_add_epi16(__A, __B),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v32hu) __A - (__v32hu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_sub_epi16(__A, __B),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_sub_epi16(__A, __B),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mullo_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v32hu) __A * (__v32hu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mullo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_mullo_epi16(__A, __B),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mullo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_mullo_epi16(__A, __B),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
-              (__v64qi) __W,
-              (__v64qi) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
-              (__v32hi) __W,
-              (__v32hi) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi8 (__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsb512((__v64qi)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_abs_epi8(__A),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_abs_epi8(__A),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi16 (__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsw512((__v32hi)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_abs_epi16(__A),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_abs_epi16(__A),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_packs_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_packssdw512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                       (__v32hi)_mm512_packs_epi32(__A, __B),
-                                       (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                       (__v32hi)_mm512_packs_epi32(__A, __B),
-                                       (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_packs_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_packsswb512((__v32hi)__A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                        (__v64qi)_mm512_packs_epi16(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                        (__v64qi)_mm512_packs_epi16(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_packus_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_packusdw512((__v16si) __A, (__v16si) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                       (__v32hi)_mm512_packus_epi32(__A, __B),
-                                       (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                       (__v32hi)_mm512_packus_epi32(__A, __B),
-                                       (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_packus_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_packuswb512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                        (__v64qi)_mm512_packus_epi16(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                        (__v64qi)_mm512_packus_epi16(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epi8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_paddsb512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_adds_epi8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_adds_epi8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_paddsw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_adds_epi16(__A, __B),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_adds_epi16(__A, __B),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_paddusb512((__v64qi) __A, (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_adds_epu8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_adds_epu8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_paddusw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_adds_epu16(__A, __B),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_adds_epu16(__A, __B),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_avg_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pavgb512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_avg_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
-          __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-              (__v64qi)_mm512_avg_epu8(__A, __B),
-              (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-              (__v64qi)_mm512_avg_epu8(__A, __B),
-              (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_avg_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pavgw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_avg_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
-           __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-              (__v32hi)_mm512_avg_epu16(__A, __B),
-              (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-              (__v32hi)_mm512_avg_epu16(__A, __B),
-              (__v32hi) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epi8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsb512((__v64qi) __A, (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_max_epi8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_max_epi8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_max_epi16(__A, __B),
-                                            (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
-           __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_max_epi16(__A, __B),
-                                            (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxub512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_max_epu8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_max_epu8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxuw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_max_epu16(__A, __B),
-                                            (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_max_epu16(__A, __B),
-                                            (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epi8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsb512((__v64qi) __A, (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_min_epi8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_min_epi8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_min_epi16(__A, __B),
-                                            (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_min_epi16(__A, __B),
-                                            (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminub512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_min_epu8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_min_epu8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminuw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_min_epu16(__A, __B),
-                                            (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_min_epu16(__A, __B),
-                                            (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_shuffle_epi8(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pshufb512((__v64qi)__A,(__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                         (__v64qi)_mm512_shuffle_epi8(__A, __B),
-                                         (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                         (__v64qi)_mm512_shuffle_epi8(__A, __B),
-                                         (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epi8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psubsb512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_subs_epi8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_subs_epi8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psubsw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_subs_epi16(__A, __B),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_subs_epi16(__A, __B),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psubusb512((__v64qi) __A, (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_subs_epu8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_subs_epu8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psubusw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_subs_epu16(__A, __B),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_subs_epu16(__A, __B),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_epi16(__m512i __A, __m512i __I, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_vpermi2varhi512((__v32hi)__A, (__v32hi)__I,
-                                                 (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_epi16(__m512i __A, __mmask32 __U, __m512i __I,
-                               __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__U,
-                              (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B),
-                              (__v32hi)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_epi16(__m512i __A, __m512i __I, __mmask32 __U,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__U,
-                              (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B),
-                              (__v32hi)__I);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__U,
-                              (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B),
-                              (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhrs_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmulhrsw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_mulhrs_epi16(__A, __B),
-                                         (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_mulhrs_epi16(__A, __B),
-                                         (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhi_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmulhw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A,
-       __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_mulhi_epi16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_mulhi_epi16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhi_epu16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmulhuw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_mulhi_epu16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_mulhi_epu16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maddubs_epi16(__m512i __X, __m512i __Y) {
-  return (__m512i)__builtin_ia32_pmaddubsw512((__v64qi)__X, (__v64qi)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_maddubs_epi16(__m512i __W, __mmask32 __U, __m512i __X,
-                          __m512i __Y) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U,
-                                        (__v32hi)_mm512_maddubs_epi16(__X, __Y),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_maddubs_epi16(__mmask32 __U, __m512i __X, __m512i __Y) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U,
-                                        (__v32hi)_mm512_maddubs_epi16(__X, __Y),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_madd_epi16(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_pmaddwd512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_madd_epi16(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_madd_epi16(__A, __B),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_madd_epi16(__mmask16 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_madd_epi16(__A, __B),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi16_epi8 (__m512i __A) {
-  return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
-               (__v32qi)_mm256_setzero_si256(),
-               (__mmask32) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
-               (__v32qi)__O,
-               __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
-               (__v32qi) _mm256_setzero_si256(),
-               __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi16_epi8 (__m512i __A) {
-  return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
-                (__v32qi) _mm256_setzero_si256(),
-                (__mmask32) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
-                (__v32qi) __O,
-                __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
-                (__v32qi) _mm256_setzero_si256(),
-                __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi8 (__m512i __A) {
-  return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
-              (__v32qi) _mm256_undefined_si256(),
-              (__mmask32) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
-              (__v32qi) __O,
-              __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
-              (__v32qi) _mm256_setzero_si256(),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
-{
-  __builtin_ia32_pmovwb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
-{
-  __builtin_ia32_pmovswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
-{
-  __builtin_ia32_pmovuswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi8(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
-                                          8,  64+8,   9, 64+9,
-                                          10, 64+10, 11, 64+11,
-                                          12, 64+12, 13, 64+13,
-                                          14, 64+14, 15, 64+15,
-                                          24, 64+24, 25, 64+25,
-                                          26, 64+26, 27, 64+27,
-                                          28, 64+28, 29, 64+29,
-                                          30, 64+30, 31, 64+31,
-                                          40, 64+40, 41, 64+41,
-                                          42, 64+42, 43, 64+43,
-                                          44, 64+44, 45, 64+45,
-                                          46, 64+46, 47, 64+47,
-                                          56, 64+56, 57, 64+57,
-                                          58, 64+58, 59, 64+59,
-                                          60, 64+60, 61, 64+61,
-                                          62, 64+62, 63, 64+63);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_unpackhi_epi8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_unpackhi_epi8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi16(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
-                                          4,  32+4,   5, 32+5,
-                                          6,  32+6,   7, 32+7,
-                                          12, 32+12, 13, 32+13,
-                                          14, 32+14, 15, 32+15,
-                                          20, 32+20, 21, 32+21,
-                                          22, 32+22, 23, 32+23,
-                                          28, 32+28, 29, 32+29,
-                                          30, 32+30, 31, 32+31);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                       (__v32hi)_mm512_unpackhi_epi16(__A, __B),
-                                       (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                       (__v32hi)_mm512_unpackhi_epi16(__A, __B),
-                                       (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi8(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
-                                          0,  64+0,   1, 64+1,
-                                          2,  64+2,   3, 64+3,
-                                          4,  64+4,   5, 64+5,
-                                          6,  64+6,   7, 64+7,
-                                          16, 64+16, 17, 64+17,
-                                          18, 64+18, 19, 64+19,
-                                          20, 64+20, 21, 64+21,
-                                          22, 64+22, 23, 64+23,
-                                          32, 64+32, 33, 64+33,
-                                          34, 64+34, 35, 64+35,
-                                          36, 64+36, 37, 64+37,
-                                          38, 64+38, 39, 64+39,
-                                          48, 64+48, 49, 64+49,
-                                          50, 64+50, 51, 64+51,
-                                          52, 64+52, 53, 64+53,
-                                          54, 64+54, 55, 64+55);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_unpacklo_epi8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_unpacklo_epi8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi16(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
-                                          0,  32+0,   1, 32+1,
-                                          2,  32+2,   3, 32+3,
-                                          8,  32+8,   9, 32+9,
-                                          10, 32+10, 11, 32+11,
-                                          16, 32+16, 17, 32+17,
-                                          18, 32+18, 19, 32+19,
-                                          24, 32+24, 25, 32+25,
-                                          26, 32+26, 27, 32+27);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                       (__v32hi)_mm512_unpacklo_epi16(__A, __B),
-                                       (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                       (__v32hi)_mm512_unpacklo_epi16(__A, __B),
-                                       (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi16(__m256i __A)
-{
-  /* This function always performs a signed extension, but __v32qi is a char
-     which may be signed or unsigned, so use __v32qs. */
-  return (__m512i)__builtin_convertvector((__v32qs)__A, __v32hi);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi8_epi16(__m512i __W, __mmask32 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_cvtepi8_epi16(__A),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_cvtepi8_epi16(__A),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi16(__m256i __A)
-{
-  return (__m512i)__builtin_convertvector((__v32qu)__A, __v32hi);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu8_epi16(__m512i __W, __mmask32 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_cvtepu8_epi16(__A),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_cvtepu8_epi16(__A),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-
-#define _mm512_shufflehi_epi16(A, imm) \
-  ((__m512i)__builtin_ia32_pshufhw512((__v32hi)(__m512i)(A), (int)(imm)))
-
-#define _mm512_mask_shufflehi_epi16(W, U, A, imm) \
-  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                       (__v32hi)_mm512_shufflehi_epi16((A), \
-                                                                       (imm)), \
-                                       (__v32hi)(__m512i)(W)))
-
-#define _mm512_maskz_shufflehi_epi16(U, A, imm) \
-  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                       (__v32hi)_mm512_shufflehi_epi16((A), \
-                                                                       (imm)), \
-                                       (__v32hi)_mm512_setzero_si512()))
-
-#define _mm512_shufflelo_epi16(A, imm) \
-  ((__m512i)__builtin_ia32_pshuflw512((__v32hi)(__m512i)(A), (int)(imm)))
-
-
-#define _mm512_mask_shufflelo_epi16(W, U, A, imm) \
-  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                       (__v32hi)_mm512_shufflelo_epi16((A), \
-                                                                       (imm)), \
-                                       (__v32hi)(__m512i)(W)))
-
-
-#define _mm512_maskz_shufflelo_epi16(U, A, imm) \
-  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                       (__v32hi)_mm512_shufflelo_epi16((A), \
-                                                                       (imm)), \
-                                       (__v32hi)_mm512_setzero_si512()))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sllv_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psllv32hi((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_sllv_epi16(__A, __B),
-                                           (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_sllv_epi16(__A, __B),
-                                           (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sll_epi16(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psllw512((__v32hi) __A, (__v8hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_sll_epi16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_sll_epi16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi16(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_slli_epi16(__A, __B),
-                                         (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_slli_epi16(__A, __B),
-                                         (__v32hi)_mm512_setzero_si512());
-}
-
-#define _mm512_bslli_epi128(a, imm) \
-  ((__m512i)__builtin_ia32_pslldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srlv_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psrlv32hi((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srlv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_srlv_epi16(__A, __B),
-                                           (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srlv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_srlv_epi16(__A, __B),
-                                           (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srav_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psrav32hi((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srav_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_srav_epi16(__A, __B),
-                                           (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_srav_epi16(__A, __B),
-                                           (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sra_epi16(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psraw512((__v32hi) __A, (__v8hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_sra_epi16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_sra_epi16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi16(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srai_epi16(__A, __B),
-                                         (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srai_epi16(__A, __B),
-                                         (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srl_epi16(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrlw512((__v32hi) __A, (__v8hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_srl_epi16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_srl_epi16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi16(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srli_epi16(__A, __B),
-                                         (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srli_epi16(__A, __B),
-                                         (__v32hi)_mm512_setzero_si512());
-}
-
-#define _mm512_bsrli_epi128(a, imm) \
-  ((__m512i)__builtin_ia32_psrldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
-                (__v32hi) __A,
-                (__v32hi) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
-                (__v32hi) __A,
-                (__v32hi) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
-                (__v64qi) __A,
-                (__v64qi) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
-                (__v64qi) __A,
-                (__v64qi) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A)
-{
-  return (__m512i) __builtin_ia32_selectb_512(__M,
-                                              (__v64qi)_mm512_set1_epi8(__A),
-                                              (__v64qi) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
-{
-  return (__m512i) __builtin_ia32_selectb_512(__M,
-                                              (__v64qi) _mm512_set1_epi8(__A),
-                                              (__v64qi) _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A,
-                (__mmask64) __B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
-                (__mmask32) __B);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi16 (void const *__P)
-{
-  struct __loadu_epi16 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi16*)__P)->__v;
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddquhi512_mask ((const __v32hi *) __P,
-                 (__v32hi) __W,
-                 (__mmask32) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddquhi512_mask ((const __v32hi *) __P,
-                 (__v32hi)
-                 _mm512_setzero_si512 (),
-                 (__mmask32) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi8 (void const *__P)
-{
-  struct __loadu_epi8 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi8*)__P)->__v;
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddquqi512_mask ((const __v64qi *) __P,
-                 (__v64qi) __W,
-                 (__mmask64) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddquqi512_mask ((const __v64qi *) __P,
-                 (__v64qi)
-                 _mm512_setzero_si512 (),
-                 (__mmask64) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi16 (void *__P, __m512i __A)
-{
-  struct __storeu_epi16 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi16*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A)
-{
-  __builtin_ia32_storedquhi512_mask ((__v32hi *) __P,
-             (__v32hi) __A,
-             (__mmask32) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi8 (void *__P, __m512i __A)
-{
-  struct __storeu_epi8 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi8*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A)
-{
-  __builtin_ia32_storedquqi512_mask ((__v64qi *) __P,
-             (__v64qi) __A,
-             (__mmask64) __U);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_test_epi8_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi8_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi8_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_test_epi16_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi16_mask (_mm512_and_epi32 (__A, __B),
-                                   _mm512_setzero_si512());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi16_mask (__U, _mm512_and_epi32 (__A, __B),
-                                        _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi8_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi8_mask (_mm512_and_epi32 (__A, __B), _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi8_mask (__U, _mm512_and_epi32 (__A, __B),
-                                      _mm512_setzero_si512());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi16_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi16_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi16_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_movepi8_mask (__m512i __A)
-{
-  return (__mmask64) __builtin_ia32_cvtb2mask512 ((__v64qi) __A);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_movepi16_mask (__m512i __A)
-{
-  return (__mmask32) __builtin_ia32_cvtw2mask512 ((__v32hi) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_movm_epi8 (__mmask64 __A)
-{
-  return (__m512i) __builtin_ia32_cvtmask2b512 (__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_movm_epi16 (__mmask32 __A)
-{
-  return (__m512i) __builtin_ia32_cvtmask2w512 (__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastb_epi8 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v16qi) __A, (__v16qi) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectb_512(__M,
-                                             (__v64qi) _mm512_broadcastb_epi8(__A),
-                                             (__v64qi) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectb_512(__M,
-                                             (__v64qi) _mm512_broadcastb_epi8(__A),
-                                             (__v64qi) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A)
-{
-  return (__m512i) __builtin_ia32_selectw_512(__M,
-                                              (__v32hi) _mm512_set1_epi16(__A),
-                                              (__v32hi) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi16 (__mmask32 __M, short __A)
-{
-  return (__m512i) __builtin_ia32_selectw_512(__M,
-                                              (__v32hi) _mm512_set1_epi16(__A),
-                                              (__v32hi) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastw_epi16 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v8hi) __A, (__v8hi) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__M,
-                                             (__v32hi) _mm512_broadcastw_epi16(__A),
-                                             (__v32hi) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__M,
-                                             (__v32hi) _mm512_broadcastw_epi16(__A),
-                                             (__v32hi) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_permvarhi512((__v32hi)__B, (__v32hi)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A,
-        __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                    (__v32hi)_mm512_permutexvar_epi16(__A, __B),
-                                    (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
-             __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                    (__v32hi)_mm512_permutexvar_epi16(__A, __B),
-                                    (__v32hi)__W);
-}
-
-#define _mm512_alignr_epi8(A, B, N) \
-  ((__m512i)__builtin_ia32_palignr512((__v64qi)(__m512i)(A), \
-                                      (__v64qi)(__m512i)(B), (int)(N)))
-
-#define _mm512_mask_alignr_epi8(W, U, A, B, N) \
-  ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
-                              (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
-                              (__v64qi)(__m512i)(W)))
-
-#define _mm512_maskz_alignr_epi8(U, A, B, N) \
-  ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
-                              (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
-                              (__v64qi)(__m512i)_mm512_setzero_si512()))
-
-#define _mm512_dbsad_epu8(A, B, imm) \
-  ((__m512i)__builtin_ia32_dbpsadbw512((__v64qi)(__m512i)(A), \
-                                       (__v64qi)(__m512i)(B), (int)(imm)))
-
-#define _mm512_mask_dbsad_epu8(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                  (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
-                                  (__v32hi)(__m512i)(W)))
-
-#define _mm512_maskz_dbsad_epu8(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                  (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
-                                  (__v32hi)_mm512_setzero_si512()))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sad_epu8 (__m512i __A, __m512i __B)
-{
- return (__m512i) __builtin_ia32_psadbw512 ((__v64qi) __A,
-               (__v64qi) __B);
-}
-
-#undef __DEFAULT_FN_ATTRS512
-#undef __DEFAULT_FN_ATTRS
-
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512fintrin.h b/linux-x86/lib64/clang/14.0.2/include/avx512fintrin.h
deleted file mode 100644
index df29864..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avx512fintrin.h
+++ /dev/null
@@ -1,9758 +0,0 @@
-/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512fintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512FINTRIN_H
-#define __AVX512FINTRIN_H
-
-typedef char __v64qi __attribute__((__vector_size__(64)));
-typedef short __v32hi __attribute__((__vector_size__(64)));
-typedef double __v8df __attribute__((__vector_size__(64)));
-typedef float __v16sf __attribute__((__vector_size__(64)));
-typedef long long __v8di __attribute__((__vector_size__(64)));
-typedef int __v16si __attribute__((__vector_size__(64)));
-
-/* Unsigned types */
-typedef unsigned char __v64qu __attribute__((__vector_size__(64)));
-typedef unsigned short __v32hu __attribute__((__vector_size__(64)));
-typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
-typedef unsigned int __v16su __attribute__((__vector_size__(64)));
-
-typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64)));
-typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64)));
-typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64)));
-
-typedef float __m512_u __attribute__((__vector_size__(64), __aligned__(1)));
-typedef double __m512d_u __attribute__((__vector_size__(64), __aligned__(1)));
-typedef long long __m512i_u __attribute__((__vector_size__(64), __aligned__(1)));
-
-typedef unsigned char __mmask8;
-typedef unsigned short __mmask16;
-
-/* Rounding mode macros.  */
-#define _MM_FROUND_TO_NEAREST_INT   0x00
-#define _MM_FROUND_TO_NEG_INF       0x01
-#define _MM_FROUND_TO_POS_INF       0x02
-#define _MM_FROUND_TO_ZERO          0x03
-#define _MM_FROUND_CUR_DIRECTION    0x04
-
-/* Constants for integer comparison predicates */
-typedef enum {
-    _MM_CMPINT_EQ,      /* Equal */
-    _MM_CMPINT_LT,      /* Less than */
-    _MM_CMPINT_LE,      /* Less than or Equal */
-    _MM_CMPINT_UNUSED,
-    _MM_CMPINT_NE,      /* Not Equal */
-    _MM_CMPINT_NLT,     /* Not Less than */
-#define _MM_CMPINT_GE   _MM_CMPINT_NLT  /* Greater than or Equal */
-    _MM_CMPINT_NLE      /* Not Less than or Equal */
-#define _MM_CMPINT_GT   _MM_CMPINT_NLE  /* Greater than */
-} _MM_CMPINT_ENUM;
-
-typedef enum
-{
-  _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02,
-  _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05,
-  _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08,
-  _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B,
-  _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E,
-  _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11,
-  _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14,
-  _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17,
-  _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A,
-  _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D,
-  _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20,
-  _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23,
-  _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26,
-  _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29,
-  _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C,
-  _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F,
-  _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32,
-  _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35,
-  _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38,
-  _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B,
-  _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E,
-  _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41,
-  _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44,
-  _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47,
-  _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A,
-  _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D,
-  _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50,
-  _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53,
-  _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56,
-  _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59,
-  _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C,
-  _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F,
-  _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62,
-  _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65,
-  _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68,
-  _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B,
-  _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E,
-  _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71,
-  _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74,
-  _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77,
-  _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A,
-  _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D,
-  _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80,
-  _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83,
-  _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86,
-  _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89,
-  _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C,
-  _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F,
-  _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92,
-  _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95,
-  _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98,
-  _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B,
-  _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E,
-  _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1,
-  _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4,
-  _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7,
-  _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA,
-  _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD,
-  _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0,
-  _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3,
-  _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6,
-  _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9,
-  _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC,
-  _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF,
-  _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2,
-  _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5,
-  _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8,
-  _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB,
-  _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE,
-  _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1,
-  _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4,
-  _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7,
-  _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA,
-  _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD,
-  _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0,
-  _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3,
-  _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6,
-  _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9,
-  _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC,
-  _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF,
-  _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2,
-  _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5,
-  _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8,
-  _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB,
-  _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE,
-  _MM_PERM_DDDD = 0xFF
-} _MM_PERM_ENUM;
-
-typedef enum
-{
-  _MM_MANT_NORM_1_2,    /* interval [1, 2)      */
-  _MM_MANT_NORM_p5_2,   /* interval [0.5, 2)    */
-  _MM_MANT_NORM_p5_1,   /* interval [0.5, 1)    */
-  _MM_MANT_NORM_p75_1p5   /* interval [0.75, 1.5) */
-} _MM_MANTISSA_NORM_ENUM;
-
-typedef enum
-{
-  _MM_MANT_SIGN_src,    /* sign = sign(SRC)     */
-  _MM_MANT_SIGN_zero,   /* sign = 0             */
-  _MM_MANT_SIGN_nan   /* DEST = NaN if sign(SRC) = 1 */
-} _MM_MANTISSA_SIGN_ENUM;
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(512)))
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
-
-/* Create vectors with repeated elements */
-
-static  __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_setzero_si512(void)
-{
-  return __extension__ (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
-}
-
-#define _mm512_setzero_epi32 _mm512_setzero_si512
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_undefined_pd(void)
-{
-  return (__m512d)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_undefined(void)
-{
-  return (__m512)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_undefined_ps(void)
-{
-  return (__m512)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_undefined_epi32(void)
-{
-  return (__m512i)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastd_epi32 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v4si) __A, (__v4si) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__M,
-                                             (__v16si) _mm512_broadcastd_epi32(__A),
-                                             (__v16si) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__M,
-                                             (__v16si) _mm512_broadcastd_epi32(__A),
-                                             (__v16si) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastq_epi64 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v2di) __A, (__v2di) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__M,
-                                             (__v8di) _mm512_broadcastq_epi64(__A),
-                                             (__v8di) __O);
-
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__M,
-                                             (__v8di) _mm512_broadcastq_epi64(__A),
-                                             (__v8di) _mm512_setzero_si512());
-}
-
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_setzero_ps(void)
-{
-  return __extension__ (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-                                 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
-}
-
-#define _mm512_setzero _mm512_setzero_ps
-
-static  __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_setzero_pd(void)
-{
-  return __extension__ (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_set1_ps(float __w)
-{
-  return __extension__ (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
-                                 __w, __w, __w, __w, __w, __w, __w, __w  };
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_set1_pd(double __w)
-{
-  return __extension__ (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi8(char __w)
-{
-  return __extension__ (__m512i)(__v64qi){
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w  };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi16(short __w)
-{
-  return __extension__ (__m512i)(__v32hi){
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi32(int __s)
-{
-  return __extension__ (__m512i)(__v16si){
-    __s, __s, __s, __s, __s, __s, __s, __s,
-    __s, __s, __s, __s, __s, __s, __s, __s };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__M,
-                                             (__v16si)_mm512_set1_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi64(long long __d)
-{
-  return __extension__(__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi64(__mmask8 __M, long long __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__M,
-                                             (__v8di)_mm512_set1_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_broadcastss_ps(__m128 __A)
-{
-  return (__m512)__builtin_shufflevector((__v4sf) __A, (__v4sf) __A,
-                                         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set4_epi32 (int __A, int __B, int __C, int __D)
-{
-  return __extension__ (__m512i)(__v16si)
-   { __D, __C, __B, __A, __D, __C, __B, __A,
-     __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set4_epi64 (long long __A, long long __B, long long __C,
-       long long __D)
-{
-  return __extension__ (__m512i) (__v8di)
-   { __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_set4_pd (double __A, double __B, double __C, double __D)
-{
-  return __extension__ (__m512d)
-   { __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_set4_ps (float __A, float __B, float __C, float __D)
-{
-  return __extension__ (__m512)
-   { __D, __C, __B, __A, __D, __C, __B, __A,
-     __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-#define _mm512_setr4_epi32(e0,e1,e2,e3)               \
-  _mm512_set4_epi32((e3),(e2),(e1),(e0))
-
-#define _mm512_setr4_epi64(e0,e1,e2,e3)               \
-  _mm512_set4_epi64((e3),(e2),(e1),(e0))
-
-#define _mm512_setr4_pd(e0,e1,e2,e3)                \
-  _mm512_set4_pd((e3),(e2),(e1),(e0))
-
-#define _mm512_setr4_ps(e0,e1,e2,e3)                \
-  _mm512_set4_ps((e3),(e2),(e1),(e0))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_broadcastsd_pd(__m128d __A)
-{
-  return (__m512d)__builtin_shufflevector((__v2df) __A, (__v2df) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-/* Cast between vector types */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_castpd256_pd512(__m256d __a)
-{
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_castps256_ps512(__m256 __a)
-{
-  return __builtin_shufflevector(__a, __a, 0,  1,  2,  3,  4,  5,  6,  7,
-                                          -1, -1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline __m128d __DEFAULT_FN_ATTRS512
-_mm512_castpd512_pd128(__m512d __a)
-{
-  return __builtin_shufflevector(__a, __a, 0, 1);
-}
-
-static __inline __m256d __DEFAULT_FN_ATTRS512
-_mm512_castpd512_pd256 (__m512d __A)
-{
-  return __builtin_shufflevector(__A, __A, 0, 1, 2, 3);
-}
-
-static __inline __m128 __DEFAULT_FN_ATTRS512
-_mm512_castps512_ps128(__m512 __a)
-{
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
-}
-
-static __inline __m256 __DEFAULT_FN_ATTRS512
-_mm512_castps512_ps256 (__m512 __A)
-{
-  return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_castpd_ps (__m512d __A)
-{
-  return (__m512) (__A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_castpd_si512 (__m512d __A)
-{
-  return (__m512i) (__A);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_castpd128_pd512 (__m128d __A)
-{
-  return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_castps_pd (__m512 __A)
-{
-  return (__m512d) (__A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_castps_si512 (__m512 __A)
-{
-  return (__m512i) (__A);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_castps128_ps512 (__m128 __A)
-{
-    return  __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_castsi128_si512 (__m128i __A)
-{
-   return  __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_castsi256_si512 (__m256i __A)
-{
-   return  __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_castsi512_ps (__m512i __A)
-{
-  return (__m512) (__A);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_castsi512_pd (__m512i __A)
-{
-  return (__m512d) (__A);
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS512
-_mm512_castsi512_si128 (__m512i __A)
-{
-  return (__m128i)__builtin_shufflevector(__A, __A , 0, 1);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS512
-_mm512_castsi512_si256 (__m512i __A)
-{
-  return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_int2mask(int __a)
-{
-  return (__mmask16)__a;
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_mask2int(__mmask16 __a)
-{
-  return (int)__a;
-}
-
-/// Constructs a 512-bit floating-point vector of [8 x double] from a
-///    128-bit floating-point vector of [2 x double]. The lower 128 bits
-///    contain the value of the source vector. The upper 384 bits are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 512-bit floating-point vector of [8 x double]. The lower 128 bits
-///    contain the value of the parameter. The upper 384 bits are set to zero.
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_zextpd128_pd512(__m128d __a)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3, 2, 3, 2, 3);
-}
-
-/// Constructs a 512-bit floating-point vector of [8 x double] from a
-///    256-bit floating-point vector of [4 x double]. The lower 256 bits
-///    contain the value of the source vector. The upper 256 bits are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \returns A 512-bit floating-point vector of [8 x double]. The lower 256 bits
-///    contain the value of the parameter. The upper 256 bits are set to zero.
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_zextpd256_pd512(__m256d __a)
-{
-  return __builtin_shufflevector((__v4df)__a, (__v4df)_mm256_setzero_pd(), 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-/// Constructs a 512-bit floating-point vector of [16 x float] from a
-///    128-bit floating-point vector of [4 x float]. The lower 128 bits contain
-///    the value of the source vector. The upper 384 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 512-bit floating-point vector of [16 x float]. The lower 128 bits
-///    contain the value of the parameter. The upper 384 bits are set to zero.
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_zextps128_ps512(__m128 __a)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7);
-}
-
-/// Constructs a 512-bit floating-point vector of [16 x float] from a
-///    256-bit floating-point vector of [8 x float]. The lower 256 bits contain
-///    the value of the source vector. The upper 256 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \returns A 512-bit floating-point vector of [16 x float]. The lower 256 bits
-///    contain the value of the parameter. The upper 256 bits are set to zero.
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_zextps256_ps512(__m256 __a)
-{
-  return __builtin_shufflevector((__v8sf)__a, (__v8sf)_mm256_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-/// Constructs a 512-bit integer vector from a 128-bit integer vector.
-///    The lower 128 bits contain the value of the source vector. The upper
-///    384 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 512-bit integer vector. The lower 128 bits contain the value of
-///    the parameter. The upper 384 bits are set to zero.
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_zextsi128_si512(__m128i __a)
-{
-  return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3, 2, 3, 2, 3);
-}
-
-/// Constructs a 512-bit integer vector from a 256-bit integer vector.
-///    The lower 256 bits contain the value of the source vector. The upper
-///    256 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit integer vector.
-/// \returns A 512-bit integer vector. The lower 256 bits contain the value of
-///    the parameter. The upper 256 bits are set to zero.
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_zextsi256_si512(__m256i __a)
-{
-  return __builtin_shufflevector((__v4di)__a, (__v4di)_mm256_setzero_si256(), 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-/* Bitwise operators */
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_and_epi32(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v16su)__a & (__v16su)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
-                (__v16si) _mm512_and_epi32(__a, __b),
-                (__v16si) __src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i) _mm512_mask_and_epi32(_mm512_setzero_si512 (),
-                                         __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_and_epi64(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a & (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
-    return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k,
-                (__v8di) _mm512_and_epi64(__a, __b),
-                (__v8di) __src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i) _mm512_mask_and_epi64(_mm512_setzero_si512 (),
-                                         __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_andnot_si512 (__m512i __A, __m512i __B)
-{
-  return (__m512i)(~(__v8du)__A & (__v8du)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_andnot_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i)(~(__v16su)__A & (__v16su)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_andnot_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)_mm512_mask_andnot_epi32(_mm512_setzero_si512(),
-                                           __U, __A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_andnot_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)(~(__v8du)__A & (__v8du)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_andnot_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)_mm512_mask_andnot_epi64(_mm512_setzero_si512(),
-                                           __U, __A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_or_epi32(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v16su)__a | (__v16su)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
-                                             (__v16si)_mm512_or_epi32(__a, __b),
-                                             (__v16si)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_or_epi64(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a | (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
-                                             (__v8di)_mm512_or_epi64(__a, __b),
-                                             (__v8di)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_xor_epi32(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v16su)__a ^ (__v16su)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
-                                            (__v16si)_mm512_xor_epi32(__a, __b),
-                                            (__v16si)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_xor_epi64(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a ^ (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
-                                             (__v8di)_mm512_xor_epi64(__a, __b),
-                                             (__v8di)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_and_si512(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a & (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_or_si512(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a | (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_xor_si512(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a ^ (__v8du)__b);
-}
-
-/* Arithmetic */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_add_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a + (__v8df)__b);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_add_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a + (__v16sf)__b);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mul_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a * (__v8df)__b);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mul_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a * (__v16sf)__b);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_sub_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a - (__v8df)__b);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_sub_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a - (__v16sf)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v8du) __A + (__v8du) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_add_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_add_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v8du) __A - (__v8du) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_sub_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_sub_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v16su) __A + (__v16su) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_add_epi32(__A, __B),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_add_epi32(__A, __B),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v16su) __A - (__v16su) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_sub_epi32(__A, __B),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_sub_epi32(__A, __B),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-#define _mm512_max_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(R)))
-
-#define _mm512_mask_max_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_max_round_pd((A), (B), (R)), \
-                                   (__v8df)(W)))
-
-#define _mm512_maskz_max_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_max_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_max_pd(__m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_maxpd512((__v8df) __A, (__v8df) __B,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_max_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_max_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_max_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(R)))
-
-#define _mm512_mask_max_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_max_round_ps((A), (B), (R)), \
-                                  (__v16sf)(W)))
-
-#define _mm512_maskz_max_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_max_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_max_ps(__m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_maxps512((__v16sf) __A, (__v16sf) __B,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_max_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_max_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_max_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf)  _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_max_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_max_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm_maskz_max_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df)  _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_max_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_max_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_max_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline __m512i
-__DEFAULT_FN_ATTRS512
-_mm512_max_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epi32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epi32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epu32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epu32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epu64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epu64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_min_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(R)))
-
-#define _mm512_mask_min_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_min_round_pd((A), (B), (R)), \
-                                   (__v8df)(W)))
-
-#define _mm512_maskz_min_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_min_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_min_pd(__m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_minpd512((__v8df) __A, (__v8df) __B,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_min_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_min_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_min_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_min_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(R)))
-
-#define _mm512_mask_min_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_min_round_ps((A), (B), (R)), \
-                                  (__v16sf)(W)))
-
-#define _mm512_maskz_min_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_min_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_min_ps(__m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_minps512((__v16sf) __A, (__v16sf) __B,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_min_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_min_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_min_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf)  _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_min_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_min_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm_maskz_min_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df)  _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_min_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_min_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_min_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline __m512i
-__DEFAULT_FN_ATTRS512
-_mm512_min_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epi32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epi32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epu32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epu32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epu64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epu64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mul_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_pmuldq512((__v16si)__X, (__v16si) __Y);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epi32(__X, __Y),
-                                             (__v8di)__W);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epi32(__X, __Y),
-                                             (__v8di)_mm512_setzero_si512 ());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mul_epu32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_pmuludq512((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epu32(__X, __Y),
-                                             (__v8di)__W);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epu32(__X, __Y),
-                                             (__v8di)_mm512_setzero_si512 ());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mullo_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v16su) __A * (__v16su) __B);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                             (__v16si)_mm512_mullo_epi32(__A, __B),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                             (__v16si)_mm512_mullo_epi32(__A, __B),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mullox_epi64 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v8du) __A * (__v8du) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mullox_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_mullox_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-#define _mm512_sqrt_round_pd(A, R) \
-  ((__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R)))
-
-#define _mm512_mask_sqrt_round_pd(W, U, A, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_sqrt_round_pd((A), (R)), \
-                                       (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_sqrt_round_pd(U, A, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_sqrt_round_pd((A), (R)), \
-                                       (__v8df)_mm512_setzero_pd()))
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_sqrt_pd(__m512d __A)
-{
-  return (__m512d)__builtin_ia32_sqrtpd512((__v8df)__A,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_sqrt_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_sqrt_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_sqrt_round_ps(A, R) \
-  ((__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R)))
-
-#define _mm512_mask_sqrt_round_ps(W, U, A, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_sqrt_round_ps((A), (R)), \
-                                      (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_sqrt_round_ps(U, A, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_sqrt_round_ps((A), (R)), \
-                                      (__v16sf)_mm512_setzero_ps()))
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_sqrt_ps(__m512 __A)
-{
-  return (__m512)__builtin_ia32_sqrtps512((__v16sf)__A,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_sqrt_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_sqrt_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_rsqrt14_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
-                 (__v8df)
-                 _mm512_setzero_pd (),
-                 (__mmask8) -1);}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
-                  (__v8df) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
-                  (__v8df)
-                  _mm512_setzero_pd (),
-                  (__mmask8) __U);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_rsqrt14_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
-                (__v16sf)
-                _mm512_setzero_ps (),
-                (__mmask16) -1);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
-                 (__v16sf) __W,
-                 (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
-                 (__v16sf)
-                 _mm512_setzero_ps (),
-                 (__mmask16) __U);
-}
-
-static  __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_rsqrt14_ss(__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
-             (__v4sf) __B,
-             (__v4sf)
-             _mm_setzero_ps (),
-             (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) _mm_setzero_ps (),
-          (__mmask8) __U);
-}
-
-static  __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_rsqrt14_sd(__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A,
-              (__v2df) __B,
-              (__v2df)
-              _mm_setzero_pd (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) _mm_setzero_pd (),
-          (__mmask8) __U);
-}
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_rcp14_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
-               (__v8df)
-               _mm512_setzero_pd (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
-                (__v8df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
-                (__v8df)
-                _mm512_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_rcp14_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
-              (__v16sf)
-              _mm512_setzero_ps (),
-              (__mmask16) -1);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
-                   (__v16sf)
-                   _mm512_setzero_ps (),
-                   (__mmask16) __U);
-}
-
-static  __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_rcp14_ss(__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
-                 (__v4sf) __B,
-                 (__v4sf)
-                 _mm_setzero_ps (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) _mm_setzero_ps (),
-          (__mmask8) __U);
-}
-
-static  __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_rcp14_sd(__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A,
-            (__v2df) __B,
-            (__v2df)
-            _mm_setzero_pd (),
-            (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) _mm_setzero_pd (),
-          (__mmask8) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_floor_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                                                  _MM_FROUND_FLOOR,
-                                                  (__v16sf) __A, -1,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                   _MM_FROUND_FLOOR,
-                   (__v16sf) __W, __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_floor_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                                                   _MM_FROUND_FLOOR,
-                                                   (__v8df) __A, -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                _MM_FROUND_FLOOR,
-                (__v8df) __W, __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                   _MM_FROUND_CEIL,
-                   (__v16sf) __W, __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_ceil_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                                                  _MM_FROUND_CEIL,
-                                                  (__v16sf) __A, -1,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_ceil_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                                                   _MM_FROUND_CEIL,
-                                                   (__v8df) __A, -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                _MM_FROUND_CEIL,
-                (__v8df) __W, __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi64(__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsq512((__v8di)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_abs_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_abs_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi32(__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsd512((__v16si) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                             (__v16si)_mm512_abs_epi32(__A),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                             (__v16si)_mm512_abs_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_add_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_add_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-
-#define _mm_add_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_add_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm_maskz_add_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_add_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_add_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-#define _mm_add_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_add_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_add_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_add_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_add_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_add_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_add_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_add_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(R)))
-
-#define _mm512_mask_add_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_add_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_add_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_add_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_add_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(R)))
-
-#define _mm512_mask_add_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_add_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_sub_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_sub_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-#define _mm_sub_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_sub_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm_maskz_sub_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_sub_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_sub_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-
-#define _mm_sub_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_sub_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_sub_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_sub_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_sub_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_sub_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_sub_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_sub_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(R)))
-
-#define _mm512_mask_sub_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_sub_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_sub_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(R)))
-
-#define _mm512_mask_sub_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_sub_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_mul_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_mul_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-#define _mm_mul_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_mul_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm_maskz_mul_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_mul_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_mul_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-
-#define _mm_mul_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_mul_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_mul_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_mul_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_mul_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_mul_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_mul_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_mul_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(R)))
-
-#define _mm512_mask_mul_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_mul_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_mul_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R)))
-
-#define _mm512_mask_mul_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_mul_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_div_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_div_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-
-#define _mm_div_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_div_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm_maskz_div_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_div_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_div_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-
-#define _mm_div_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_div_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_div_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_div_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a/(__v8df)__b);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_div_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_div_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_div_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a/(__v16sf)__b);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_div_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_div_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_div_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(R)))
-
-#define _mm512_mask_div_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_div_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_div_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_div_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_div_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(R)))
-
-#define _mm512_mask_div_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_div_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-#define _mm512_roundscale_ps(A, B) \
-  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
-                                          (__v16sf)_mm512_undefined_ps(), \
-                                          (__mmask16)-1, \
-                                          _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_roundscale_ps(A, B, C, imm) \
-  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
-                                         (__v16sf)(__m512)(A), (__mmask16)(B), \
-                                         _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_roundscale_ps(A, B, imm) \
-  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(A), \
-                                          _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) \
-  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
-                                         (__v16sf)(__m512)(A), (__mmask16)(B), \
-                                         (int)(R)))
-
-#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) \
-  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(A), (int)(R)))
-
-#define _mm512_roundscale_round_ps(A, imm, R) \
-  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                          (__v16sf)_mm512_undefined_ps(), \
-                                          (__mmask16)-1, (int)(R)))
-
-#define _mm512_roundscale_pd(A, B) \
-  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, \
-                                           _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_roundscale_pd(A, B, C, imm) \
-  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
-                                          (__v8df)(__m512d)(A), (__mmask8)(B), \
-                                          _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_roundscale_pd(A, B, imm) \
-  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(A), \
-                                           _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) \
-  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
-                                          (__v8df)(__m512d)(A), (__mmask8)(B), \
-                                          (int)(R)))
-
-#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) \
-  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(A), (int)(R)))
-
-#define _mm512_roundscale_round_pd(A, imm, R) \
-  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm512_fmadd_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)-1, (int)(R)))
-
-
-#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_fmsub_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)-1, (int)(R)))
-
-
-#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             -(__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_fnmadd_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)-1, (int)(R)))
-
-
-#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_fnmsub_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)-1, (int)(R)))
-
-
-#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             -(__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
-                                                     (__v8df) __B,
-                                                     -(__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     -(__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmadd_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)-1, (int)(R)))
-
-
-#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_fmsub_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)-1, (int)(R)))
-
-
-#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            -(__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_fnmadd_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           -(__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)-1, (int)(R)))
-
-
-#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_fnmsub_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           -(__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)-1, (int)(R)))
-
-
-#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            -(__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    -(__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    -(__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmaddsub_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)-1, (int)(R)))
-
-
-#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
-                                                (__v8df)(__m512d)(B), \
-                                                (__v8df)(__m512d)(C), \
-                                                (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
-                                                (__v8df)(__m512d)(B), \
-                                                (__v8df)(__m512d)(C), \
-                                                (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_fmsubadd_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               -(__v8df)(__m512d)(C), \
-                                               (__mmask8)-1, (int)(R)))
-
-
-#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               -(__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
-                                                (__v8df)(__m512d)(B), \
-                                                -(__v8df)(__m512d)(C), \
-                                                (__mmask8)(U), (int)(R)))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                      (__v8df) __B,
-                                                      (__v8df) __C,
-                                                      (__mmask8) -1,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                      (__v8df) __B,
-                                                      (__v8df) __C,
-                                                      (__mmask8) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       (__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       (__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       -(__v8df) __C,
-                                                       (__mmask8) -1,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       -(__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
-                                                        (__v8df) __B,
-                                                        -(__v8df) __C,
-                                                        (__mmask8) __U,
-                                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmaddsub_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)-1, (int)(R)))
-
-
-#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
-                                               (__v16sf)(__m512)(B), \
-                                               (__v16sf)(__m512)(C), \
-                                               (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
-                                               (__v16sf)(__m512)(B), \
-                                               (__v16sf)(__m512)(C), \
-                                               (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_fmsubadd_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              -(__v16sf)(__m512)(C), \
-                                              (__mmask16)-1, (int)(R)))
-
-
-#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              -(__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
-                                               (__v16sf)(__m512)(B), \
-                                               -(__v16sf)(__m512)(C), \
-                                               (__mmask16)(U), (int)(R)))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      (__v16sf) __C,
-                                                      (__mmask16) -1,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      (__v16sf) __C,
-                                                      (__mmask16) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       (__v16sf) __C,
-                                                       (__mmask16) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       (__v16sf) __C,
-                                                       (__mmask16) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      -(__v16sf) __C,
-                                                      (__mmask16) -1,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      -(__v16sf) __C,
-                                                      (__mmask16) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       -(__v16sf) __C,
-                                                       (__mmask16) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \
-  ((__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d)__builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \
-  ((__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512)__builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \
-  ((__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
-                                                (__v8df)(__m512d)(B), \
-                                                (__v8df)(__m512d)(C), \
-                                                (__mmask8)(U), (int)(R)))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d)__builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       (__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \
-  ((__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
-                                               (__v16sf)(__m512)(B), \
-                                               (__v16sf)(__m512)(C), \
-                                               (__mmask16)(U), (int)(R)))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512)__builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      (__v16sf) __C,
-                                                      (__mmask16) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                            -(__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R)))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           -(__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R)))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \
-  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                            -(__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R)))
-
-
-#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \
-  ((__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8df)(__m512d)(C), \
-                                             (__mmask8)(U), (int)(R)))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmsubpd512_mask3 (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \
-  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                           -(__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R)))
-
-
-#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \
-  ((__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16sf)(__m512)(C), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmsubps512_mask3 (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-
-
-/* Vector permutations */
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_vpermi2vard512((__v16si)__A, (__v16si) __I,
-                                                (__v16si) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_epi32(__m512i __A, __mmask16 __U, __m512i __I,
-                               __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                              (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
-                              (__v16si)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_epi32(__m512i __A, __m512i __I, __mmask16 __U,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                              (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
-                              (__v16si)__I);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_epi32(__mmask16 __U, __m512i __A, __m512i __I,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                              (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
-                              (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_vpermi2varq512((__v8di)__A, (__v8di) __I,
-                                                (__v8di) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_epi64(__m512i __A, __mmask8 __U, __m512i __I,
-                               __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                               (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
-                               (__v8di)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_epi64(__m512i __A, __m512i __I, __mmask8 __U,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                               (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
-                               (__v8di)__I);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                               (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
-                               (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_alignr_epi64(A, B, I) \
-  ((__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \
-                                     (__v8di)(__m512i)(B), (int)(I)))
-
-#define _mm512_mask_alignr_epi64(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                  (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
-                                  (__v8di)(__m512i)(W)))
-
-#define _mm512_maskz_alignr_epi64(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                  (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
-                                  (__v8di)_mm512_setzero_si512()))
-
-#define _mm512_alignr_epi32(A, B, I) \
-  ((__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \
-                                     (__v16si)(__m512i)(B), (int)(I)))
-
-#define _mm512_mask_alignr_epi32(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                 (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
-                                 (__v16si)(__m512i)(W)))
-
-#define _mm512_maskz_alignr_epi32(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                 (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
-                                 (__v16si)_mm512_setzero_si512()))
-/* Vector Extract */
-
-#define _mm512_extractf64x4_pd(A, I) \
-  ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
-                                             (__v4df)_mm256_undefined_pd(), \
-                                             (__mmask8)-1))
-
-#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
-  ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                             (__v4df)(__m256d)(W), \
-                                             (__mmask8)(U)))
-
-#define _mm512_maskz_extractf64x4_pd(U, A, imm) \
-  ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                             (__v4df)_mm256_setzero_pd(), \
-                                             (__mmask8)(U)))
-
-#define _mm512_extractf32x4_ps(A, I) \
-  ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
-                                            (__v4sf)_mm_undefined_ps(), \
-                                            (__mmask8)-1))
-
-#define _mm512_mask_extractf32x4_ps(W, U, A, imm) \
-  ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                            (__v4sf)(__m128)(W), \
-                                            (__mmask8)(U)))
-
-#define _mm512_maskz_extractf32x4_ps(U, A, imm) \
-  ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                            (__v4sf)_mm_setzero_ps(), \
-                                            (__mmask8)(U)))
-
-/* Vector Blend */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
-{
-  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
-                 (__v8df) __W,
-                 (__v8df) __A);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
-{
-  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
-                (__v16sf) __W,
-                (__v16sf) __A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
-                (__v8di) __W,
-                (__v8di) __A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
-                (__v16si) __W,
-                (__v16si) __A);
-}
-
-/* Compare */
-
-#define _mm512_cmp_round_ps_mask(A, B, P, R) \
-  ((__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), (int)(P), \
-                                           (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) \
-  ((__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), (int)(P), \
-                                           (__mmask16)(U), (int)(R)))
-
-#define _mm512_cmp_ps_mask(A, B, P) \
-  _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_cmp_ps_mask(U, A, B, P) \
-  _mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_cmpeq_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_EQ_OQ)
-#define _mm512_mask_cmpeq_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_EQ_OQ)
-
-#define _mm512_cmplt_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_LT_OS)
-#define _mm512_mask_cmplt_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LT_OS)
-
-#define _mm512_cmple_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_LE_OS)
-#define _mm512_mask_cmple_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LE_OS)
-
-#define _mm512_cmpunord_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_UNORD_Q)
-#define _mm512_mask_cmpunord_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_UNORD_Q)
-
-#define _mm512_cmpneq_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_NEQ_UQ)
-#define _mm512_mask_cmpneq_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NEQ_UQ)
-
-#define _mm512_cmpnlt_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_NLT_US)
-#define _mm512_mask_cmpnlt_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLT_US)
-
-#define _mm512_cmpnle_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_NLE_US)
-#define _mm512_mask_cmpnle_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLE_US)
-
-#define _mm512_cmpord_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_ORD_Q)
-#define _mm512_mask_cmpord_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_ORD_Q)
-
-#define _mm512_cmp_round_pd_mask(A, B, P, R) \
-  ((__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
-                                          (__v8df)(__m512d)(B), (int)(P), \
-                                          (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) \
-  ((__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
-                                          (__v8df)(__m512d)(B), (int)(P), \
-                                          (__mmask8)(U), (int)(R)))
-
-#define _mm512_cmp_pd_mask(A, B, P) \
-  _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_cmp_pd_mask(U, A, B, P) \
-  _mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_cmpeq_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_EQ_OQ)
-#define _mm512_mask_cmpeq_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_EQ_OQ)
-
-#define _mm512_cmplt_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_LT_OS)
-#define _mm512_mask_cmplt_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LT_OS)
-
-#define _mm512_cmple_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_LE_OS)
-#define _mm512_mask_cmple_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LE_OS)
-
-#define _mm512_cmpunord_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_UNORD_Q)
-#define _mm512_mask_cmpunord_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_UNORD_Q)
-
-#define _mm512_cmpneq_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_NEQ_UQ)
-#define _mm512_mask_cmpneq_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NEQ_UQ)
-
-#define _mm512_cmpnlt_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_NLT_US)
-#define _mm512_mask_cmpnlt_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLT_US)
-
-#define _mm512_cmpnle_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_NLE_US)
-#define _mm512_mask_cmpnle_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLE_US)
-
-#define _mm512_cmpord_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_ORD_Q)
-#define _mm512_mask_cmpord_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_ORD_Q)
-
-/* Conversion */
-
-#define _mm512_cvtt_roundps_epu32(A, R) \
-  ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                              (__v16si)_mm512_undefined_epi32(), \
-                                              (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) \
-  ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                              (__v16si)(__m512i)(W), \
-                                              (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) \
-  ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                              (__v16si)_mm512_setzero_si512(), \
-                                              (__mmask16)(U), (int)(R)))
-
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttps_epu32(__m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
-                  (__v16si)
-                  _mm512_setzero_si512 (),
-                  (__mmask16) -1,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
-                   (__v16si) __W,
-                   (__mmask16) __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
-                   (__v16si) _mm512_setzero_si512 (),
-                   (__mmask16) __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundepi32_ps(A, R) \
-  ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) \
-  ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) \
-  ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R)))
-
-#define _mm512_cvt_roundepu32_ps(A, R) \
-  ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                            (__v16sf)_mm512_setzero_ps(), \
-                                            (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) \
-  ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                            (__v16sf)(__m512)(W), \
-                                            (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) \
-  ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                            (__v16sf)_mm512_setzero_ps(), \
-                                            (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_ps (__m512i __A)
-{
-  return (__m512)__builtin_convertvector((__v16su)__A, __v16sf);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepu32_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepu32_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_pd(__m256i __A)
-{
-  return (__m512d)__builtin_convertvector((__v8si)__A, __v8df);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepi32_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepi32_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32lo_pd(__m512i __A)
-{
-  return (__m512d) _mm512_cvtepi32_pd(_mm512_castsi512_si256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
-{
-  return (__m512d) _mm512_mask_cvtepi32_pd(__W, __U, _mm512_castsi512_si256(__A));
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_ps (__m512i __A)
-{
-  return (__m512)__builtin_convertvector((__v16si)__A, __v16sf);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepi32_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepi32_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_pd(__m256i __A)
-{
-  return (__m512d)__builtin_convertvector((__v8su)__A, __v8df);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepu32_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepu32_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32lo_pd(__m512i __A)
-{
-  return (__m512d) _mm512_cvtepu32_pd(_mm512_castsi512_si256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
-{
-  return (__m512d) _mm512_mask_cvtepu32_pd(__W, __U, _mm512_castsi512_si256(__A));
-}
-
-#define _mm512_cvt_roundpd_ps(A, R) \
-  ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                           (__v8sf)_mm256_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) \
-  ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                           (__v8sf)(__m256)(W), (__mmask8)(U), \
-                                           (int)(R)))
-
-#define _mm512_maskz_cvt_roundpd_ps(U, A, R) \
-  ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                           (__v8sf)_mm256_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_ps (__m512d __A)
-{
-  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
-                (__v8sf) _mm256_undefined_ps (),
-                (__mmask8) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_ps (__m256 __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
-                (__v8sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A)
-{
-  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
-                (__v8sf) _mm256_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_pslo (__m512d __A)
-{
-  return (__m512) __builtin_shufflevector((__v8sf) _mm512_cvtpd_ps(__A),
-                (__v8sf) _mm256_setzero_ps (),
-                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A)
-{
-  return (__m512) __builtin_shufflevector (
-                (__v8sf) _mm512_mask_cvtpd_ps (_mm512_castps512_ps256(__W),
-                                               __U, __A),
-                (__v8sf) _mm256_setzero_ps (),
-                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-#define _mm512_cvt_roundps_ph(A, I) \
-  ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                             (__v16hi)_mm256_undefined_si256(), \
-                                             (__mmask16)-1))
-
-#define _mm512_mask_cvt_roundps_ph(U, W, A, I) \
-  ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                             (__v16hi)(__m256i)(U), \
-                                             (__mmask16)(W)))
-
-#define _mm512_maskz_cvt_roundps_ph(W, A, I) \
-  ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                             (__v16hi)_mm256_setzero_si256(), \
-                                             (__mmask16)(W)))
-
-#define _mm512_cvtps_ph       _mm512_cvt_roundps_ph
-#define _mm512_mask_cvtps_ph  _mm512_mask_cvt_roundps_ph
-#define _mm512_maskz_cvtps_ph _mm512_maskz_cvt_roundps_ph
-
-#define _mm512_cvt_roundph_ps(A, R) \
-  ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                            (__v16sf)_mm512_undefined_ps(), \
-                                            (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundph_ps(W, U, A, R) \
-  ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                            (__v16sf)(__m512)(W), \
-                                            (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundph_ps(U, A, R) \
-  ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                            (__v16sf)_mm512_setzero_ps(), \
-                                            (__mmask16)(U), (int)(R)))
-
-
-static  __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtph_ps(__m256i __A)
-{
-  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
-                (__v16sf)
-                _mm512_setzero_ps (),
-                (__mmask16) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A)
-{
-  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
-                 (__v16sf) __W,
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
-{
-  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
-                 (__v16sf) _mm512_setzero_ps (),
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundpd_epi32(A, R) \
-  ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_setzero_si256(), \
-                                             (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) \
-  ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)(__m256i)(W), \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) \
-  ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_setzero_si256(), \
-                                             (__mmask8)(U), (int)(R)))
-
-static __inline __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvttpd_epi32(__m512d __a)
-{
-  return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a,
-                                                   (__v8si)_mm256_setzero_si256(),
-                                                   (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
-                  (__v8si) _mm256_setzero_si256 (),
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundps_epi32(A, R) \
-  ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_setzero_si512(), \
-                                             (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) \
-  ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)(__m512i)(W), \
-                                             (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) \
-  ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_setzero_si512(), \
-                                             (__mmask16)(U), (int)(R)))
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttps_epi32(__m512 __a)
-{
-  return (__m512i)
-    __builtin_ia32_cvttps2dq512_mask((__v16sf) __a,
-                                     (__v16si) _mm512_setzero_si512 (),
-                                     (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
-                  (__v16si) __W,
-                  (__mmask16) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
-                  (__v16si) _mm512_setzero_si512 (),
-                  (__mmask16) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundps_epi32(A, R) \
-  ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) \
-  ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)(__m512i)(W), \
-                                            (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundps_epi32(U, A, R) \
-  ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtps_epi32 (__m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
-                 (__v16si) _mm512_undefined_epi32 (),
-                 (__mmask16) -1,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
-                 (__v16si) __W,
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
-                 (__v16si)
-                 _mm512_setzero_si512 (),
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundpd_epi32(A, R) \
-  ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) \
-  ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)(__m256i)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) \
-  ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_epi32 (__m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
-                 (__v8si)
-                 _mm256_undefined_si256 (),
-                 (__mmask8) -1,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
-                 (__v8si) __W,
-                 (__mmask8) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
-                 (__v8si)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundps_epu32(A, R) \
-  ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_setzero_si512(), \
-                                             (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) \
-  ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)(__m512i)(W), \
-                                             (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundps_epu32(U, A, R) \
-  ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_setzero_si512(), \
-                                             (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtps_epu32 ( __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,\
-                  (__v16si)\
-                  _mm512_undefined_epi32 (),
-                  (__mmask16) -1,\
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
-                  (__v16si) __W,
-                  (__mmask16) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
-                  (__v16si)
-                  _mm512_setzero_si512 (),
-                  (__mmask16) __U ,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundpd_epu32(A, R) \
-  ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_setzero_si256(), \
-                                             (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) \
-  ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)(__m256i)(W), \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) \
-  ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_setzero_si256(), \
-                                             (__mmask8)(U), (int)(R)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_epu32 (__m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_undefined_si256 (),
-                  (__mmask8) -1,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_epu32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_cvtsd_f64(__m512d __a)
-{
-  return __a[0];
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_cvtss_f32(__m512 __a)
-{
-  return __a[0];
-}
-
-/* Unpack and Interleave */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
-                                          1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpackhi_pd(__A, __B),
-                                           (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpackhi_pd(__A, __B),
-                                           (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
-                                          0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpacklo_pd(__A, __B),
-                                           (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpacklo_pd(__A, __B),
-                                           (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
-                                         2,    18,    3,    19,
-                                         2+4,  18+4,  3+4,  19+4,
-                                         2+8,  18+8,  3+8,  19+8,
-                                         2+12, 18+12, 3+12, 19+12);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpackhi_ps(__A, __B),
-                                          (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpackhi_ps(__A, __B),
-                                          (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
-                                         0,    16,    1,    17,
-                                         0+4,  16+4,  1+4,  17+4,
-                                         0+8,  16+8,  1+8,  17+8,
-                                         0+12, 16+12, 1+12, 17+12);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpacklo_ps(__A, __B),
-                                          (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpacklo_ps(__A, __B),
-                                          (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
-                                          2,    18,    3,    19,
-                                          2+4,  18+4,  3+4,  19+4,
-                                          2+8,  18+8,  3+8,  19+8,
-                                          2+12, 18+12, 3+12, 19+12);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpackhi_epi32(__A, __B),
-                                       (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpackhi_epi32(__A, __B),
-                                       (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
-                                          0,    16,    1,    17,
-                                          0+4,  16+4,  1+4,  17+4,
-                                          0+8,  16+8,  1+8,  17+8,
-                                          0+12, 16+12, 1+12, 17+12);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpacklo_epi32(__A, __B),
-                                       (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpacklo_epi32(__A, __B),
-                                       (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
-                                          1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpackhi_epi64(__A, __B),
-                                        (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpackhi_epi64(__A, __B),
-                                        (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
-                                          0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpacklo_epi64(__A, __B),
-                                        (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpacklo_epi64(__A, __B),
-                                        (__v8di)_mm512_setzero_si512());
-}
-
-
-/* SIMD load ops */
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_si512 (void const *__P)
-{
-  struct __loadu_si512 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_si512*)__P)->__v;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi32 (void const *__P)
-{
-  struct __loadu_epi32 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi32*)__P)->__v;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P,
-                  (__v16si) __W,
-                  (__mmask16) __U);
-}
-
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *)__P,
-                                                     (__v16si)
-                                                     _mm512_setzero_si512 (),
-                                                     (__mmask16) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi64 (void const *__P)
-{
-  struct __loadu_epi64 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi64*)__P)->__v;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P,
-                  (__v8di) __W,
-                  (__mmask8) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *)__P,
-                                                     (__v8di)
-                                                     _mm512_setzero_si512 (),
-                                                     (__mmask8) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadups512_mask ((const float *) __P,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadups512_mask ((const float *)__P,
-                                                  (__v16sf)
-                                                  _mm512_setzero_ps (),
-                                                  (__mmask16) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadupd512_mask ((const double *) __P,
-                (__v8df) __W,
-                (__mmask8) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadupd512_mask ((const double *)__P,
-                                                   (__v8df)
-                                                   _mm512_setzero_pd (),
-                                                   (__mmask8) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_loadu_pd(void const *__p)
-{
-  struct __loadu_pd {
-    __m512d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_pd*)__p)->__v;
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_loadu_ps(void const *__p)
-{
-  struct __loadu_ps {
-    __m512_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_ps*)__p)->__v;
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_load_ps(void const *__p)
-{
-  return *(const __m512*)__p;
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *) __P,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P,
-                                                  (__v16sf)
-                                                  _mm512_setzero_ps (),
-                                                  (__mmask16) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_load_pd(void const *__p)
-{
-  return *(const __m512d*)__p;
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *) __P,
-                          (__v8df) __W,
-                          (__mmask8) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P,
-                                                   (__v8df)
-                                                   _mm512_setzero_pd (),
-                                                   (__mmask8) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_load_si512 (void const *__P)
-{
-  return *(const __m512i *) __P;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_load_epi32 (void const *__P)
-{
-  return *(const __m512i *) __P;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_load_epi64 (void const *__P)
-{
-  return *(const __m512i *) __P;
-}
-
-/* SIMD store ops */
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi64 (void *__P, __m512i __A)
-{
-  struct __storeu_epi64 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi64*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
-{
-  __builtin_ia32_storedqudi512_mask ((long long *)__P, (__v8di) __A,
-                                     (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_si512 (void *__P, __m512i __A)
-{
-  struct __storeu_si512 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si512*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi32 (void *__P, __m512i __A)
-{
-  struct __storeu_epi32 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi32*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
-{
-  __builtin_ia32_storedqusi512_mask ((int *)__P, (__v16si) __A,
-                                     (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A)
-{
-  __builtin_ia32_storeupd512_mask ((double *)__P, (__v8df) __A, (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_pd(void *__P, __m512d __A)
-{
-  struct __storeu_pd {
-    __m512d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_pd*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A)
-{
-  __builtin_ia32_storeups512_mask ((float *)__P, (__v16sf) __A,
-                                   (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_ps(void *__P, __m512 __A)
-{
-  struct __storeu_ps {
-    __m512_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_ps*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A)
-{
-  __builtin_ia32_storeapd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_pd(void *__P, __m512d __A)
-{
-  *(__m512d*)__P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A)
-{
-  __builtin_ia32_storeaps512_mask ((__v16sf *)__P, (__v16sf) __A,
-                                   (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_ps(void *__P, __m512 __A)
-{
-  *(__m512*)__P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_si512 (void *__P, __m512i __A)
-{
-  *(__m512i *) __P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_epi32 (void *__P, __m512i __A)
-{
-  *(__m512i *) __P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_epi64 (void *__P, __m512i __A)
-{
-  *(__m512i *) __P = __A;
-}
-
-/* Mask ops */
-
-static __inline __mmask16 __DEFAULT_FN_ATTRS
-_mm512_knot(__mmask16 __M)
-{
-  return __builtin_ia32_knothi(__M);
-}
-
-/* Integer compare */
-
-#define _mm512_cmpeq_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi32(__m128i __A)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m512i)__builtin_convertvector((__v16qs)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepi8_epi32(__A),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepi8_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi64(__m128i __A)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__A, (__v16qs)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi8_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi8_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi64(__m256i __X)
-{
-  return (__m512i)__builtin_convertvector((__v8si)__X, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi32_epi64(__X),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi32_epi64(__X),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi32(__m256i __A)
-{
-  return (__m512i)__builtin_convertvector((__v16hi)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepi16_epi32(__A),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepi16_epi32(__A),
-                                            (__v16si)_mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi64(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector((__v8hi)__A, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi16_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi16_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi32(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector((__v16qu)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepu8_epi32(__A),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepu8_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi64(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__A, (__v16qu)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu8_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu8_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_epi64(__m256i __X)
-{
-  return (__m512i)__builtin_convertvector((__v8su)__X, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu32_epi64(__X),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu32_epi64(__X),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu16_epi32(__m256i __A)
-{
-  return (__m512i)__builtin_convertvector((__v16hu)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepu16_epi32(__A),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepu16_epi32(__A),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu16_epi64(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector((__v8hu)__A, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu16_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu16_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rorv_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prorvd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rorv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rorv_epi32(__A, __B),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rorv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rorv_epi32(__A, __B),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rorv_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prorvq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rorv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rorv_epi64(__A, __B),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rorv_epi64(__A, __B),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-
-
-#define _mm512_cmp_epi32_mask(a, b, p) \
-  ((__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
-                                          (__v16si)(__m512i)(b), (int)(p), \
-                                          (__mmask16)-1))
-
-#define _mm512_cmp_epu32_mask(a, b, p) \
-  ((__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
-                                           (__v16si)(__m512i)(b), (int)(p), \
-                                           (__mmask16)-1))
-
-#define _mm512_cmp_epi64_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
-                                         (__v8di)(__m512i)(b), (int)(p), \
-                                         (__mmask8)-1))
-
-#define _mm512_cmp_epu64_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
-                                          (__v8di)(__m512i)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm512_mask_cmp_epi32_mask(m, a, b, p) \
-  ((__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
-                                          (__v16si)(__m512i)(b), (int)(p), \
-                                          (__mmask16)(m)))
-
-#define _mm512_mask_cmp_epu32_mask(m, a, b, p) \
-  ((__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
-                                           (__v16si)(__m512i)(b), (int)(p), \
-                                           (__mmask16)(m)))
-
-#define _mm512_mask_cmp_epi64_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
-                                         (__v8di)(__m512i)(b), (int)(p), \
-                                         (__mmask8)(m)))
-
-#define _mm512_mask_cmp_epu64_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
-                                          (__v8di)(__m512i)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm512_rol_epi32(a, b) \
-  ((__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b)))
-
-#define _mm512_mask_rol_epi32(W, U, a, b) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_rol_epi32((a), (b)), \
-                                       (__v16si)(__m512i)(W)))
-
-#define _mm512_maskz_rol_epi32(U, a, b) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_rol_epi32((a), (b)), \
-                                       (__v16si)_mm512_setzero_si512()))
-
-#define _mm512_rol_epi64(a, b) \
-  ((__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b)))
-
-#define _mm512_mask_rol_epi64(W, U, a, b) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_rol_epi64((a), (b)), \
-                                       (__v8di)(__m512i)(W)))
-
-#define _mm512_maskz_rol_epi64(U, a, b) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_rol_epi64((a), (b)), \
-                                       (__v8di)_mm512_setzero_si512()))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rolv_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prolvd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rolv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rolv_epi32(__A, __B),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rolv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rolv_epi32(__A, __B),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rolv_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prolvq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rolv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rolv_epi64(__A, __B),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rolv_epi64(__A, __B),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_ror_epi32(A, B) \
-  ((__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B)))
-
-#define _mm512_mask_ror_epi32(W, U, A, B) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_ror_epi32((A), (B)), \
-                                       (__v16si)(__m512i)(W)))
-
-#define _mm512_maskz_ror_epi32(U, A, B) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_ror_epi32((A), (B)), \
-                                       (__v16si)_mm512_setzero_si512()))
-
-#define _mm512_ror_epi64(A, B) \
-  ((__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B)))
-
-#define _mm512_mask_ror_epi64(W, U, A, B) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_ror_epi64((A), (B)), \
-                                       (__v8di)(__m512i)(W)))
-
-#define _mm512_maskz_ror_epi64(U, A, B) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_ror_epi64((A), (B)), \
-                                       (__v8di)_mm512_setzero_si512()))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi32(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_slli_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_slli_epi32(__A, __B),
-                                         (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi64(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_slli_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_slli_epi64(__A, __B),
-                                          (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi32(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srli_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srli_epi32(__A, __B),
-                                         (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi64(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srli_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A,
-                        unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srli_epi64(__A, __B),
-                                          (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
-              (__v16si) __W,
-              (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_epi32 (__mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
-              (__v16si)
-              _mm512_setzero_si512 (),
-              (__mmask16) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A)
-{
-  __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A,
-          (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
-                 (__v16si) __A,
-                 (__v16si) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
-                 (__v16si) __A,
-                 (__v16si) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
-                 (__v8di) __A,
-                 (__v8di) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
-                 (__v8di) __A,
-                 (__v8di) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
-              (__v8di) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_epi64 (__mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
-              (__v8di)
-              _mm512_setzero_si512 (),
-              (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A)
-{
-  __builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A,
-          (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_movedup_pd (__m512d __A)
-{
-  return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A,
-                                          0, 0, 2, 2, 4, 4, 6, 6);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_movedup_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_movedup_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_movedup_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_fixupimm_round_pd(A, B, C, imm, R) \
-  ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                              (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) \
-  ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                              (__mmask8)(U), (int)(R)))
-
-#define _mm512_fixupimm_pd(A, B, C, imm) \
-  ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                              (__mmask8)-1, \
-                                              _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) \
-  ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                              (__mmask8)(U), \
-                                              _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) \
-  ((__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8di)(__m512i)(C), \
-                                               (int)(imm), (__mmask8)(U), \
-                                               (int)(R)))
-
-#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) \
-  ((__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8di)(__m512i)(C), \
-                                               (int)(imm), (__mmask8)(U), \
-                                               _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_fixupimm_round_ps(A, B, C, imm, R) \
-  ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) \
-  ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)(U), (int)(R)))
-
-#define _mm512_fixupimm_ps(A, B, C, imm) \
-  ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)-1, \
-                                             _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) \
-  ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)(U), \
-                                             _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) \
-  ((__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16si)(__m512i)(C), \
-                                              (int)(imm), (__mmask16)(U), \
-                                              (int)(R)))
-
-#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) \
-  ((__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16si)(__m512i)(C), \
-                                              (int)(imm), (__mmask16)(U), \
-                                              _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_fixupimm_round_sd(A, B, C, imm, R) \
-  ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) \
-  ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), (int)(R)))
-
-#define _mm_fixupimm_sd(A, B, C, imm) \
-  ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)-1, \
-                                           _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \
-  ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), \
-                                           _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \
-  ((__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2di)(__m128i)(C), (int)(imm), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \
-  ((__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2di)(__m128i)(C), (int)(imm), \
-                                            (__mmask8)(U), \
-                                            _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_fixupimm_round_ss(A, B, C, imm, R) \
-  ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) \
-  ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), (int)(R)))
-
-#define _mm_fixupimm_ss(A, B, C, imm) \
-  ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)-1, \
-                                          _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \
-  ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), \
-                                          _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \
-  ((__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4si)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \
-  ((__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4si)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), \
-                                           _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_getexp_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                  (__v2df)(__m128d)(B), \
-                                                  (__v2df)_mm_setzero_pd(), \
-                                                  (__mmask8)-1, (int)(R)))
-
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_getexp_sd (__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A,
-                 (__v2df) __B, (__v2df) _mm_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) __W,
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_getexp_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                  (__v2df)(__m128d)(B), \
-                                                  (__v2df)(__m128d)(W), \
-                                                  (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) _mm_setzero_pd (),
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_getexp_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                  (__v2df)(__m128d)(B), \
-                                                  (__v2df)_mm_setzero_pd(), \
-                                                  (__mmask8)(U), (int)(R)))
-
-#define _mm_getexp_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                 (__v4sf)(__m128)(B), \
-                                                 (__v4sf)_mm_setzero_ps(), \
-                                                 (__mmask8)-1, (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_getexp_ss (__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
-                (__v4sf) __B, (__v4sf)  _mm_setzero_ps(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) __W,
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_getexp_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                 (__v4sf)(__m128)(B), \
-                                                 (__v4sf)(__m128)(W), \
-                                                 (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) _mm_setzero_ps (),
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_getexp_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                 (__v4sf)(__m128)(B), \
-                                                 (__v4sf)_mm_setzero_ps(), \
-                                                 (__mmask8)(U), (int)(R)))
-
-#define _mm_getmant_round_sd(A, B, C, D, R) \
-  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (int)(((D)<<2) | (C)), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)-1, (int)(R)))
-
-#define _mm_getmant_sd(A, B, C, D)  \
-  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (int)(((D)<<2) | (C)), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)-1, \
-                                                _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getmant_sd(W, U, A, B, C, D) \
-  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (int)(((D)<<2) | (C)), \
-                                                (__v2df)(__m128d)(W), \
-                                                (__mmask8)(U), \
-                                                _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R) \
-  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (int)(((D)<<2) | (C)), \
-                                                (__v2df)(__m128d)(W), \
-                                                (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_getmant_sd(U, A, B, C, D) \
-  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (int)(((D)<<2) | (C)), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U), \
-                                                _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) \
-  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (int)(((D)<<2) | (C)), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U), (int)(R)))
-
-#define _mm_getmant_round_ss(A, B, C, D, R) \
-  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)-1, (int)(R)))
-
-#define _mm_getmant_ss(A, B, C, D) \
-  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)-1, \
-                                               _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getmant_ss(W, U, A, B, C, D) \
-  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v4sf)(__m128)(W), \
-                                               (__mmask8)(U), \
-                                               _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R) \
-  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v4sf)(__m128)(W), \
-                                               (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_getmant_ss(U, A, B, C, D) \
-  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)(U), \
-                                               _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) \
-  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)(U), (int)(R)))
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kmov (__mmask16 __A)
-{
-  return  __A;
-}
-
-#define _mm_comi_round_sd(A, B, P, R) \
-  ((int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
-                               (int)(P), (int)(R)))
-
-#define _mm_comi_round_ss(A, B, P, R) \
-  ((int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
-                               (int)(P), (int)(R)))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsd_si64(A, R) \
-  ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)))
-#endif
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sll_epi32(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_pslld512((__v16si) __A, (__v4si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sll_epi32(__A, __B),
-                                          (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sll_epi32(__A, __B),
-                                          (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sll_epi64(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psllq512((__v8di)__A, (__v2di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_sll_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_sll_epi64(__A, __B),
-                                           (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sllv_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psllv16si((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sllv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_sllv_epi32(__X, __Y),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sllv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_sllv_epi32(__X, __Y),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sllv_epi64(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psllv8di((__v8di)__X, (__v8di)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sllv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_sllv_epi64(__X, __Y),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_sllv_epi64(__X, __Y),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sra_epi32(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrad512((__v16si) __A, (__v4si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sra_epi32(__A, __B),
-                                          (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sra_epi32(__A, __B),
-                                          (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sra_epi64(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psraq512((__v8di)__A, (__v2di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_sra_epi64(__A, __B),
-                                           (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_sra_epi64(__A, __B),
-                                           (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srav_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrav16si((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srav_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srav_epi32(__X, __Y),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srav_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srav_epi32(__X, __Y),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srav_epi64(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrav8di((__v8di)__X, (__v8di)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srav_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srav_epi64(__X, __Y),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srav_epi64(__X, __Y),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srl_epi32(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrld512((__v16si) __A, (__v4si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_srl_epi32(__A, __B),
-                                          (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_srl_epi32(__A, __B),
-                                          (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srl_epi64(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrlq512((__v8di)__A, (__v2di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_srl_epi64(__A, __B),
-                                           (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_srl_epi64(__A, __B),
-                                           (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srlv_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrlv16si((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srlv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srlv_epi32(__X, __Y),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srlv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srlv_epi32(__X, __Y),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srlv_epi64 (__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrlv8di((__v8di)__X, (__v8di)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srlv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srlv_epi64(__X, __Y),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srlv_epi64(__X, __Y),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_ternarylogic_epi32(A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                             (__v16si)(__m512i)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)-1))
-
-#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                             (__v16si)(__m512i)(B), \
-                                             (__v16si)(__m512i)(C), (int)(imm), \
-                                             (__mmask16)(U)))
-
-#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
-                                              (__v16si)(__m512i)(B), \
-                                              (__v16si)(__m512i)(C), \
-                                              (int)(imm), (__mmask16)(U)))
-
-#define _mm512_ternarylogic_epi64(A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                             (__v8di)(__m512i)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                             (__v8di)(__m512i)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  ((__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
-                                              (__v8di)(__m512i)(B), \
-                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsd_i64(A, R) \
-  ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)))
-#endif
-
-#define _mm_cvt_roundsd_si32(A, R) \
-  ((int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)))
-
-#define _mm_cvt_roundsd_i32(A, R) \
-  ((int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)))
-
-#define _mm_cvt_roundsd_u32(A, R) \
-  ((unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R)))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvtsd_u32 (__m128d __A)
-{
-  return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A,
-             _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsd_u64(A, R) \
-  ((unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
-                                                   (int)(R)))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvtsd_u64 (__m128d __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df)
-                 __A,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvt_roundss_si32(A, R) \
-  ((int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)))
-
-#define _mm_cvt_roundss_i32(A, R) \
-  ((int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundss_si64(A, R) \
-  ((long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)))
-
-#define _mm_cvt_roundss_i64(A, R) \
-  ((long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)))
-#endif
-
-#define _mm_cvt_roundss_u32(A, R) \
-  ((unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvtss_u32 (__m128 __A)
-{
-  return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A,
-             _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundss_u64(A, R) \
-  ((unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
-                                                   (int)(R)))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvtss_u64 (__m128 __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf)
-                 __A,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundsd_i32(A, R) \
-  ((int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)))
-
-#define _mm_cvtt_roundsd_si32(A, R) \
-  ((int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)))
-
-static __inline__ int __DEFAULT_FN_ATTRS128
-_mm_cvttsd_i32 (__m128d __A)
-{
-  return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundsd_si64(A, R) \
-  ((long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)))
-
-#define _mm_cvtt_roundsd_i64(A, R) \
-  ((long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)))
-
-static __inline__ long long __DEFAULT_FN_ATTRS128
-_mm_cvttsd_i64 (__m128d __A)
-{
-  return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundsd_u32(A, R) \
-  ((unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvttsd_u32 (__m128d __A)
-{
-  return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundsd_u64(A, R) \
-  ((unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
-                                                    (int)(R)))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvttsd_u64 (__m128d __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df)
-                  __A,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundss_i32(A, R) \
-  ((int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)))
-
-#define _mm_cvtt_roundss_si32(A, R) \
-  ((int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)))
-
-static __inline__ int __DEFAULT_FN_ATTRS128
-_mm_cvttss_i32 (__m128 __A)
-{
-  return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundss_i64(A, R) \
-  ((long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)))
-
-#define _mm_cvtt_roundss_si64(A, R) \
-  ((long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)))
-
-static __inline__ long long __DEFAULT_FN_ATTRS128
-_mm_cvttss_i64 (__m128 __A)
-{
-  return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundss_u32(A, R) \
-  ((unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvttss_u32 (__m128 __A)
-{
-  return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundss_u64(A, R) \
-  ((unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
-                                                    (int)(R)))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvttss_u64 (__m128 __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf)
-                  __A,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm512_permute_pd(X, C) \
-  ((__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C)))
-
-#define _mm512_mask_permute_pd(W, U, X, C) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_permute_pd((X), (C)), \
-                                        (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_permute_pd(U, X, C) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_permute_pd((X), (C)), \
-                                        (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_permute_ps(X, C) \
-  ((__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C)))
-
-#define _mm512_mask_permute_ps(W, U, X, C) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                       (__v16sf)_mm512_permute_ps((X), (C)), \
-                                       (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_permute_ps(U, X, C) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                       (__v16sf)_mm512_permute_ps((X), (C)), \
-                                       (__v16sf)_mm512_setzero_ps()))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_permutevar_pd(__m512d __A, __m512i __C)
-{
-  return (__m512d)__builtin_ia32_vpermilvarpd512((__v8df)__A, (__v8di)__C);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_permutevar_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512i __C)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                         (__v8df)_mm512_permutevar_pd(__A, __C),
-                                         (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutevar_pd(__mmask8 __U, __m512d __A, __m512i __C)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                         (__v8df)_mm512_permutevar_pd(__A, __C),
-                                         (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_permutevar_ps(__m512 __A, __m512i __C)
-{
-  return (__m512)__builtin_ia32_vpermilvarps512((__v16sf)__A, (__v16si)__C);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_permutevar_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512i __C)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                        (__v16sf)_mm512_permutevar_ps(__A, __C),
-                                        (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutevar_ps(__mmask16 __U, __m512 __A, __m512i __C)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                        (__v16sf)_mm512_permutevar_ps(__A, __C),
-                                        (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_vpermi2varpd512((__v8df)__A, (__v8di)__I,
-                                                 (__v8df)__B);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_pd(__m512d __A, __mmask8 __U, __m512i __I, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                  (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
-                                  (__v8df)__A);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_pd(__m512d __A, __m512i __I, __mmask8 __U,
-                             __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                  (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
-                                  (__v8df)(__m512d)__I);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_pd(__mmask8 __U, __m512d __A, __m512i __I,
-                             __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                  (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
-                                  (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
-{
-  return (__m512)__builtin_ia32_vpermi2varps512((__v16sf)__A, (__v16si)__I,
-                                                (__v16sf) __B);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_ps(__m512 __A, __mmask16 __U, __m512i __I, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                 (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
-                                 (__v16sf)__A);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_ps(__m512 __A, __m512i __I, __mmask16 __U, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                 (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
-                                 (__v16sf)(__m512)__I);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_ps(__mmask16 __U, __m512 __A, __m512i __I, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                 (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
-                                 (__v16sf)_mm512_setzero_ps());
-}
-
-
-#define _mm512_cvtt_roundpd_epu32(A, R) \
-  ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                              (__v8si)_mm256_undefined_si256(), \
-                                              (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) \
-  ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                              (__v8si)(__m256i)(W), \
-                                              (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) \
-  ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                              (__v8si)_mm256_setzero_si256(), \
-                                              (__mmask8)(U), (int)(R)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvttpd_epu32 (__m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_undefined_si256 (),
-                  (__mmask8) -1,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_roundscale_round_sd(A, B, imm, R) \
-  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)-1, (int)(imm), \
-                                                 (int)(R)))
-
-#define _mm_roundscale_sd(A, B, imm) \
-  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)-1, (int)(imm), \
-                                                 _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_roundscale_sd(W, U, A, B, imm) \
-  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)(__m128d)(W), \
-                                                 (__mmask8)(U), (int)(imm), \
-                                                 _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) \
-  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)(__m128d)(W), \
-                                                 (__mmask8)(U), (int)(I), \
-                                                 (int)(R)))
-
-#define _mm_maskz_roundscale_sd(U, A, B, I) \
-  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)(U), (int)(I), \
-                                                 _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \
-  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)(U), (int)(I), \
-                                                 (int)(R)))
-
-#define _mm_roundscale_round_ss(A, B, imm, R) \
-  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)-1, (int)(imm), \
-                                                (int)(R)))
-
-#define _mm_roundscale_ss(A, B, imm) \
-  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)-1, (int)(imm), \
-                                                _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_roundscale_ss(W, U, A, B, I) \
-  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)(__m128)(W), \
-                                                (__mmask8)(U), (int)(I), \
-                                                _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) \
-  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)(__m128)(W), \
-                                                (__mmask8)(U), (int)(I), \
-                                                (int)(R)))
-
-#define _mm_maskz_roundscale_ss(U, A, B, I) \
-  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)(U), (int)(I), \
-                                                _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \
-  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)(U), (int)(I), \
-                                                (int)(R)))
-
-#define _mm512_scalef_round_pd(A, B, R) \
-  ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)_mm512_undefined_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_scalef_round_pd(W, U, A, B, R) \
-  ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_scalef_round_pd(U, A, B, R) \
-  ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_scalef_pd (__m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
-                (__v8df) __B,
-                (__v8df)
-                _mm512_undefined_pd (),
-                (__mmask8) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_scalef_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
-                (__v8df) __B,
-                (__v8df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
-                (__v8df) __B,
-                (__v8df)
-                _mm512_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_scalef_round_ps(A, B, R) \
-  ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_scalef_round_ps(W, U, A, B, R) \
-  ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_scalef_round_ps(U, A, B, R) \
-  ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_scalef_ps (__m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
-               (__v16sf) __B,
-               (__v16sf)
-               _mm512_undefined_ps (),
-               (__mmask16) -1,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_scalef_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
-               (__v16sf) __B,
-               (__v16sf) __W,
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
-               (__v16sf) __B,
-               (__v16sf)
-               _mm512_setzero_ps (),
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_scalef_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)-1, (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_scalef_sd (__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_scalefsd_round_mask ((__v2df) __A,
-              (__v2df)( __B), (__v2df) _mm_setzero_pd(),
-              (__mmask8) -1,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_scalef_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (__v2df)(__m128d)(W), \
-                                               (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_scalef_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(U), (int)(R)))
-
-#define _mm_scalef_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)-1, (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_scalef_ss (__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_scalefss_round_mask ((__v4sf) __A,
-             (__v4sf)( __B), (__v4sf) _mm_setzero_ps(),
-             (__mmask8) -1,
-             _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_scalef_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v4sf)(__m128)(W), \
-                                              (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
-                 (__v4sf) __B,
-                (__v4sf) _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_scalef_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(U), \
-                                              (int)(R)))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi32(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A,
-                       unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srai_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A,
-                        unsigned int __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srai_epi32(__A, __B),
-                                         (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi64(__m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srai_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srai_epi64(__A, __B),
-                                          (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_shuffle_f32x4(A, B, imm) \
-  ((__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \
-                                     (__v16sf)(__m512)(B), (int)(imm)))
-
-#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                       (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
-                                       (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                       (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
-                                       (__v16sf)_mm512_setzero_ps()))
-
-#define _mm512_shuffle_f64x2(A, B, imm) \
-  ((__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \
-                                      (__v8df)(__m512d)(B), (int)(imm)))
-
-#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
-                                        (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
-                                        (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_shuffle_i32x4(A, B, imm) \
-  ((__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \
-                                      (__v16si)(__m512i)(B), (int)(imm)))
-
-#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
-                                       (__v16si)(__m512i)(W)))
-
-#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
-                                       (__v16si)_mm512_setzero_si512()))
-
-#define _mm512_shuffle_i64x2(A, B, imm) \
-  ((__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \
-                                      (__v8di)(__m512i)(B), (int)(imm)))
-
-#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
-                                       (__v8di)(__m512i)(W)))
-
-#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
-                                       (__v8di)_mm512_setzero_si512()))
-
-#define _mm512_shuffle_pd(A, B, M) \
-  ((__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \
-                                     (__v8df)(__m512d)(B), (int)(M)))
-
-#define _mm512_mask_shuffle_pd(W, U, A, B, M) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
-                                        (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_shuffle_pd(U, A, B, M) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
-                                        (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_shuffle_ps(A, B, M) \
-  ((__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \
-                                    (__v16sf)(__m512)(B), (int)(M)))
-
-#define _mm512_mask_shuffle_ps(W, U, A, B, M) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                       (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
-                                       (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_shuffle_ps(U, A, B, M) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                       (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
-                                       (__v16sf)_mm512_setzero_ps()))
-
-#define _mm_sqrt_round_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v2df)_mm_setzero_pd(), \
-                                             (__mmask8)-1, (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_sqrt_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v2df)(__m128d)(W), \
-                                             (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_sqrt_round_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v2df)_mm_setzero_pd(), \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm_sqrt_round_ss(A, B, R) \
-  ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                            (__v4sf)(__m128)(B), \
-                                            (__v4sf)_mm_setzero_ps(), \
-                                            (__mmask8)-1, (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
-                 (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_sqrt_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                            (__v4sf)(__m128)(B), \
-                                            (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                            (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
-                 (__v4sf) __B,
-                (__v4sf) _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_sqrt_round_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                            (__v4sf)(__m128)(B), \
-                                            (__v4sf)_mm_setzero_ps(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f32x4(__m128 __A)
-{
-  return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
-                                         0, 1, 2, 3, 0, 1, 2, 3,
-                                         0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
-                                           (__v16sf)_mm512_broadcast_f32x4(__A),
-                                           (__v16sf)__O);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_f32x4(__mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
-                                           (__v16sf)_mm512_broadcast_f32x4(__A),
-                                           (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f64x4(__m256d __A)
-{
-  return (__m512d)__builtin_shufflevector((__v4df)__A, (__v4df)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, __m256d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
-                                            (__v8df)_mm512_broadcast_f64x4(__A),
-                                            (__v8df)__O);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
-                                            (__v8df)_mm512_broadcast_f64x4(__A),
-                                            (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i32x4(__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                           (__v16si)_mm512_broadcast_i32x4(__A),
-                                           (__v16si)__O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                           (__v16si)_mm512_broadcast_i32x4(__A),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i64x4(__m256i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v4di)__A, (__v4di)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                            (__v8di)_mm512_broadcast_i64x4(__A),
-                                            (__v8di)__O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_i64x4(__mmask8 __M, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                            (__v8di)_mm512_broadcast_i64x4(__A),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastsd_pd (__m512d __O, __mmask8 __M, __m128d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__M,
-                                              (__v8df) _mm512_broadcastsd_pd(__A),
-                                              (__v8df) __O);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__M,
-                                              (__v8df) _mm512_broadcastsd_pd(__A),
-                                              (__v8df) _mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastss_ps (__m512 __O, __mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__M,
-                                             (__v16sf) _mm512_broadcastss_ps(__A),
-                                             (__v16sf) __O);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastss_ps (__mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__M,
-                                             (__v16sf) _mm512_broadcastss_ps(__A),
-                                             (__v16sf) _mm512_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi32_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
-               (__v16qi) _mm_undefined_si128 (),
-               (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi32_epi8 (__mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi32_epi16 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
-               (__v16hi) _mm256_undefined_si256 (),
-               (__mmask16) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
-               (__v16hi) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi32_epi16 (__mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
-               (__v16hi) _mm256_setzero_si256 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi64_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
-               (__v16qi) _mm_undefined_si128 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi64_epi8 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi64_epi32 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
-               (__v8si) _mm256_undefined_si256 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
-               (__v8si) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi64_epi32 (__mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
-               (__v8si) _mm256_setzero_si256 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_storeu_epi32 (void *__P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi64_epi16 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
-               (__v8hi) _mm_undefined_si128 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
-               (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi64_epi16 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
-               (__v8hi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi32_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
-                (__v16qi) _mm_undefined_si128 (),
-                (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi32_epi8 (__mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi32_epi16 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
-                (__v16hi) _mm256_undefined_si256 (),
-                (__mmask16) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
-                (__v16hi) __O,
-                __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi32_epi16 (__mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
-                (__v16hi) _mm256_setzero_si256 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi64_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
-                (__v16qi) _mm_undefined_si128 (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi64_epi8 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi64_epi32 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
-                (__v8si) _mm256_undefined_si256 (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
-                (__v8si) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi64_epi32 (__mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
-                (__v8si) _mm256_setzero_si256 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusqd512mem_mask ((__v8si*) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi64_epi16 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
-                (__v8hi) _mm_undefined_si128 (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
-                (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi64_epi16 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
-                (__v8hi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusqw512mem_mask ((__v8hi*) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
-              (__v16qi) _mm_undefined_si128 (),
-              (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi16 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
-              (__v16hi) _mm256_undefined_si256 (),
-              (__mmask16) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
-              (__v16hi) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
-              (__v16hi) _mm256_setzero_si256 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_storeu_epi16 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovdw512mem_mask ((__v16hi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
-              (__v16qi) _mm_undefined_si128 (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_epi8 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_epi32 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
-              (__v8si) _mm256_undefined_si256 (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
-              (__v8si) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
-              (__v8si) _mm256_setzero_si256 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_epi16 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
-              (__v8hi) _mm_undefined_si128 (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
-              (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
-              (__v8hi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
-}
-
-#define _mm512_extracti32x4_epi32(A, imm) \
-  ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                             (__v4si)_mm_undefined_si128(), \
-                                             (__mmask8)-1))
-
-#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \
-  ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                             (__v4si)(__m128i)(W), \
-                                             (__mmask8)(U)))
-
-#define _mm512_maskz_extracti32x4_epi32(U, A, imm) \
-  ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                             (__v4si)_mm_setzero_si128(), \
-                                             (__mmask8)(U)))
-
-#define _mm512_extracti64x4_epi64(A, imm) \
-  ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                             (__v4di)_mm256_undefined_si256(), \
-                                             (__mmask8)-1))
-
-#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \
-  ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                             (__v4di)(__m256i)(W), \
-                                             (__mmask8)(U)))
-
-#define _mm512_maskz_extracti64x4_epi64(U, A, imm) \
-  ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                             (__v4di)_mm256_setzero_si256(), \
-                                             (__mmask8)(U)))
-
-#define _mm512_insertf64x4(A, B, imm) \
-  ((__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \
-                                       (__v4df)(__m256d)(B), (int)(imm)))
-
-#define _mm512_mask_insertf64x4(W, U, A, B, imm) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
-                                   (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_insertf64x4(U, A, B, imm) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
-                                   (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_inserti64x4(A, B, imm) \
-  ((__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \
-                                       (__v4di)(__m256i)(B), (int)(imm)))
-
-#define _mm512_mask_inserti64x4(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                   (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
-                                   (__v8di)(__m512i)(W)))
-
-#define _mm512_maskz_inserti64x4(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                   (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
-                                   (__v8di)_mm512_setzero_si512()))
-
-#define _mm512_insertf32x4(A, B, imm) \
-  ((__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \
-                                      (__v4sf)(__m128)(B), (int)(imm)))
-
-#define _mm512_mask_insertf32x4(W, U, A, B, imm) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
-                                  (__v16sf)(__m512)(W)))
-
-#define _mm512_maskz_insertf32x4(U, A, B, imm) \
-  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
-                                  (__v16sf)_mm512_setzero_ps()))
-
-#define _mm512_inserti32x4(A, B, imm) \
-  ((__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \
-                                       (__v4si)(__m128i)(B), (int)(imm)))
-
-#define _mm512_mask_inserti32x4(W, U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                  (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
-                                  (__v16si)(__m512i)(W)))
-
-#define _mm512_maskz_inserti32x4(U, A, B, imm) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                  (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
-                                  (__v16si)_mm512_setzero_si512()))
-
-#define _mm512_getmant_round_pd(A, B, C, R) \
-  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v8df)_mm512_undefined_pd(), \
-                                             (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v8df)(__m512d)(W), \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) \
-  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v8df)_mm512_setzero_pd(), \
-                                             (__mmask8)(U), (int)(R)))
-
-#define _mm512_getmant_pd(A, B, C) \
-  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v8df)_mm512_setzero_pd(), \
-                                             (__mmask8)-1, \
-                                             _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_getmant_pd(W, U, A, B, C) \
-  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v8df)(__m512d)(W), \
-                                             (__mmask8)(U), \
-                                             _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_getmant_pd(U, A, B, C) \
-  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v8df)_mm512_setzero_pd(), \
-                                             (__mmask8)(U), \
-                                             _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_getmant_round_ps(A, B, C, R) \
-  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v16sf)_mm512_undefined_ps(), \
-                                            (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) \
-  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v16sf)(__m512)(W), \
-                                            (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) \
-  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v16sf)_mm512_setzero_ps(), \
-                                            (__mmask16)(U), (int)(R)))
-
-#define _mm512_getmant_ps(A, B, C) \
-  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                            (int)(((C)<<2)|(B)), \
-                                            (__v16sf)_mm512_undefined_ps(), \
-                                            (__mmask16)-1, \
-                                            _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_mask_getmant_ps(W, U, A, B, C) \
-  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                            (int)(((C)<<2)|(B)), \
-                                            (__v16sf)(__m512)(W), \
-                                            (__mmask16)(U), \
-                                            _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_maskz_getmant_ps(U, A, B, C) \
-  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                            (int)(((C)<<2)|(B)), \
-                                            (__v16sf)_mm512_setzero_ps(), \
-                                            (__mmask16)(U), \
-                                            _MM_FROUND_CUR_DIRECTION))
-
-#define _mm512_getexp_round_pd(A, R) \
-  ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)_mm512_undefined_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_getexp_round_pd(W, U, A, R) \
-  ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_getexp_round_pd(U, A, R) \
-  ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_getexp_pd (__m512d __A)
-{
-  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
-                (__v8df) _mm512_undefined_pd (),
-                (__mmask8) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_getexp_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
-                (__v8df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
-                (__v8df) _mm512_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_getexp_round_ps(A, R) \
-  ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_getexp_round_ps(W, U, A, R) \
-  ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R)))
-
-#define _mm512_maskz_getexp_round_ps(U, A, R) \
-  ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R)))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_getexp_ps (__m512 __A)
-{
-  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
-               (__v16sf) _mm512_undefined_ps (),
-               (__mmask16) -1,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_getexp_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
-               (__v16sf) __W,
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
-               (__v16sf) _mm512_setzero_ps (),
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_i64gather_ps(index, addr, scale) \
-  ((__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                        (int)(scale)))
-
-#define _mm512_mask_i64gather_ps(v1_old, mask, index, addr, scale) \
-  ((__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm512_i64gather_epi32(index, addr, scale) \
-  ((__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
-                                         (void const *)(addr), \
-                                         (__v8di)(__m512i)(index), \
-                                         (__mmask8)-1, (int)(scale)))
-
-#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) \
-  ((__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v8di)(__m512i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm512_i64gather_pd(index, addr, scale) \
-  ((__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                        (int)(scale)))
-
-#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) \
-  ((__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm512_i64gather_epi64(index, addr, scale) \
-  ((__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                        (int)(scale)))
-
-#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) \
-  ((__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm512_i32gather_ps(index, addr, scale) \
-  ((__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
-                                        (void const *)(addr), \
-                                        (__v16si)(__m512)(index), \
-                                        (__mmask16)-1, (int)(scale)))
-
-#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \
-  ((__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v16si)(__m512)(index), \
-                                        (__mmask16)(mask), (int)(scale)))
-
-#define _mm512_i32gather_epi32(index, addr, scale) \
-  ((__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
-                                         (void const *)(addr), \
-                                         (__v16si)(__m512i)(index), \
-                                         (__mmask16)-1, (int)(scale)))
-
-#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) \
-  ((__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v16si)(__m512i)(index), \
-                                         (__mmask16)(mask), (int)(scale)))
-
-#define _mm512_i32gather_pd(index, addr, scale) \
-  ((__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
-                                        (void const *)(addr), \
-                                        (__v8si)(__m256i)(index), (__mmask8)-1, \
-                                        (int)(scale)))
-
-#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) \
-  ((__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8si)(__m256i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm512_i32gather_epi64(index, addr, scale) \
-  ((__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
-                                        (void const *)(addr), \
-                                        (__v8si)(__m256i)(index), (__mmask8)-1, \
-                                        (int)(scale)))
-
-#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) \
-  ((__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8si)(__m256i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm512_i64scatter_ps(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)-1, \
-                                (__v8di)(__m512i)(index), \
-                                (__v8sf)(__m256)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)(mask), \
-                                (__v8di)(__m512i)(index), \
-                                (__v8sf)(__m256)(v1), (int)(scale))
-
-#define _mm512_i64scatter_epi32(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)-1, \
-                                (__v8di)(__m512i)(index), \
-                                (__v8si)(__m256i)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)(mask), \
-                                (__v8di)(__m512i)(index), \
-                                (__v8si)(__m256i)(v1), (int)(scale))
-
-#define _mm512_i64scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)-1, \
-                               (__v8di)(__m512i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)(mask), \
-                               (__v8di)(__m512i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_i64scatter_epi64(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)-1, \
-                               (__v8di)(__m512i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)(mask), \
-                               (__v8di)(__m512i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-#define _mm512_i32scatter_ps(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)-1, \
-                                (__v16si)(__m512i)(index), \
-                                (__v16sf)(__m512)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)(mask), \
-                                (__v16si)(__m512i)(index), \
-                                (__v16sf)(__m512)(v1), (int)(scale))
-
-#define _mm512_i32scatter_epi32(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)-1, \
-                                (__v16si)(__m512i)(index), \
-                                (__v16si)(__m512i)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)(mask), \
-                                (__v16si)(__m512i)(index), \
-                                (__v16si)(__m512i)(v1), (int)(scale))
-
-#define _mm512_i32scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)-1, \
-                               (__v8si)(__m256i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)(mask), \
-                               (__v8si)(__m256i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_i32scatter_epi64(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)-1, \
-                               (__v8si)(__m256i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)(mask), \
-                               (__v8si)(__m256i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       (__v4sf)__A,
-                                       (__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmadd_round_ss(A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4sf)(__m128)(C), (__mmask8)-1, \
-                                         (int)(R)))
-
-#define _mm_mask_fmadd_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                         (__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), (__mmask8)(U), \
-                                         (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        (__v4sf)__B,
-                                        (__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(C), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W,
-                                        (__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
-                                          (__v4sf)(__m128)(X), \
-                                          (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       (__v4sf)__A,
-                                       -(__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmsub_round_ss(A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         -(__v4sf)(__m128)(C), (__mmask8)-1, \
-                                         (int)(R)))
-
-#define _mm_mask_fmsub_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                         (__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), (__mmask8)(U), \
-                                         (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        (__v4sf)__B,
-                                        -(__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          -(__v4sf)(__m128)(C), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W,
-                                        (__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) \
-  ((__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
-                                          (__v4sf)(__m128)(X), \
-                                          (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       -(__v4sf)__A,
-                                       (__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmadd_round_ss(A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), \
-                                         (__v4sf)(__m128)(C), (__mmask8)-1, \
-                                         (int)(R)))
-
-#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                         -(__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), (__mmask8)(U), \
-                                         (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        -(__v4sf)__B,
-                                        (__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                          -(__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(C), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W,
-                                        -(__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
-                                          -(__v4sf)(__m128)(X), \
-                                          (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       -(__v4sf)__A,
-                                       -(__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmsub_round_ss(A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), \
-                                         -(__v4sf)(__m128)(C), (__mmask8)-1, \
-                                         (int)(R)))
-
-#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                         -(__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), (__mmask8)(U), \
-                                         (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        -(__v4sf)__B,
-                                        -(__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \
-  ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                          -(__v4sf)(__m128)(B), \
-                                          -(__v4sf)(__m128)(C), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W,
-                                        -(__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) \
-  ((__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
-                                          -(__v4sf)(__m128)(X), \
-                                          (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       (__v2df)__A,
-                                       (__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmadd_round_sd(A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2df)(__m128d)(C), (__mmask8)-1, \
-                                          (int)(R)))
-
-#define _mm_mask_fmadd_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                          (__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        (__v2df)__B,
-                                        (__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(C), (__mmask8)(U), \
-                                           (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W,
-                                        (__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
-                                           (__v2df)(__m128d)(X), \
-                                           (__v2df)(__m128d)(Y), (__mmask8)(U), \
-                                           (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       (__v2df)__A,
-                                       -(__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmsub_round_sd(A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          -(__v2df)(__m128d)(C), (__mmask8)-1, \
-                                          (int)(R)))
-
-#define _mm_mask_fmsub_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                          (__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        (__v2df)__B,
-                                        -(__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           -(__v2df)(__m128d)(C), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W,
-                                        (__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) \
-  ((__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
-                                           (__v2df)(__m128d)(X), \
-                                           (__v2df)(__m128d)(Y), \
-                                           (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       -(__v2df)__A,
-                                       (__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmadd_round_sd(A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), \
-                                          (__v2df)(__m128d)(C), (__mmask8)-1, \
-                                          (int)(R)))
-
-#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                          -(__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        -(__v2df)__B,
-                                        (__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                           -(__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(C), (__mmask8)(U), \
-                                           (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W,
-                                        -(__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
-                                           -(__v2df)(__m128d)(X), \
-                                           (__v2df)(__m128d)(Y), (__mmask8)(U), \
-                                           (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       -(__v2df)__A,
-                                       -(__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmsub_round_sd(A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), \
-                                          -(__v2df)(__m128d)(C), (__mmask8)-1, \
-                                          (int)(R)))
-
-#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                          -(__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), (__mmask8)(U), \
-                                          (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        -(__v2df)__B,
-                                        -(__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \
-  ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                           -(__v2df)(__m128d)(B), \
-                                           -(__v2df)(__m128d)(C), \
-                                           (__mmask8)(U), \
-                                           (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W,
-                                        -(__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) \
-  ((__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
-                                           -(__v2df)(__m128d)(X), \
-                                           (__v2df)(__m128d)(Y), \
-                                           (__mmask8)(U), (int)(R)))
-
-#define _mm512_permutex_pd(X, C) \
-  ((__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C)))
-
-#define _mm512_mask_permutex_pd(W, U, X, C) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_permutex_pd((X), (C)), \
-                                        (__v8df)(__m512d)(W)))
-
-#define _mm512_maskz_permutex_pd(U, X, C) \
-  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                        (__v8df)_mm512_permutex_pd((X), (C)), \
-                                        (__v8df)_mm512_setzero_pd()))
-
-#define _mm512_permutex_epi64(X, C) \
-  ((__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C)))
-
-#define _mm512_mask_permutex_epi64(W, U, X, C) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_permutex_epi64((X), (C)), \
-                                       (__v8di)(__m512i)(W)))
-
-#define _mm512_maskz_permutex_epi64(U, X, C) \
-  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                       (__v8di)_mm512_permutex_epi64((X), (C)), \
-                                       (__v8di)_mm512_setzero_si512()))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_pd (__m512i __X, __m512d __Y)
-{
-  return (__m512d)__builtin_ia32_permvardf512((__v8df) __Y, (__v8di) __X);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_pd (__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                        (__v8df)_mm512_permutexvar_pd(__X, __Y),
-                                        (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_pd (__mmask8 __U, __m512i __X, __m512d __Y)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                        (__v8df)_mm512_permutexvar_pd(__X, __Y),
-                                        (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_epi64 (__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_permvardi512((__v8di)__Y, (__v8di)__X);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_epi64 (__mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                     (__v8di)_mm512_permutexvar_epi64(__X, __Y),
-                                     (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_epi64 (__m512i __W, __mmask8 __M, __m512i __X,
-             __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                     (__v8di)_mm512_permutexvar_epi64(__X, __Y),
-                                     (__v8di)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_ps (__m512i __X, __m512 __Y)
-{
-  return (__m512)__builtin_ia32_permvarsf512((__v16sf)__Y, (__v16si)__X);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_ps (__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                       (__v16sf)_mm512_permutexvar_ps(__X, __Y),
-                                       (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_ps (__mmask16 __U, __m512i __X, __m512 __Y)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                       (__v16sf)_mm512_permutexvar_ps(__X, __Y),
-                                       (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_epi32 (__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_permvarsi512((__v16si)__Y, (__v16si)__X);
-}
-
-#define _mm512_permutevar_epi32 _mm512_permutexvar_epi32
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_epi32 (__mmask16 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                    (__v16si)_mm512_permutexvar_epi32(__X, __Y),
-                                    (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X,
-             __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                    (__v16si)_mm512_permutexvar_epi32(__X, __Y),
-                                    (__v16si)__W);
-}
-
-#define _mm512_mask_permutevar_epi32 _mm512_mask_permutexvar_epi32
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kand (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kandn (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kor (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_kortestc (__mmask16 __A, __mmask16 __B)
-{
-  return __builtin_ia32_kortestchi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_kortestz (__mmask16 __A, __mmask16 __B)
-{
-  return __builtin_ia32_kortestzhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestc_mask16_u8(__mmask16 __A, __mmask16 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestchi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestz_mask16_u8(__mmask16 __A, __mmask16 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestzhi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_kortestchi(__A, __B);
-  return (unsigned char)__builtin_ia32_kortestzhi(__A, __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kunpackb (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kxnor (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kxor (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-#define _kand_mask16 _mm512_kand
-#define _kandn_mask16 _mm512_kandn
-#define _knot_mask16 _mm512_knot
-#define _kor_mask16 _mm512_kor
-#define _kxnor_mask16 _mm512_kxnor
-#define _kxor_mask16 _mm512_kxor
-
-#define _kshiftli_mask16(A, I) \
-  ((__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I)))
-
-#define _kshiftri_mask16(A, I) \
-  ((__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I)))
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_cvtmask16_u32(__mmask16 __A) {
-  return (unsigned int)__builtin_ia32_kmovw((__mmask16)__A);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_cvtu32_mask16(unsigned int __A) {
-  return (__mmask16)__builtin_ia32_kmovw((__mmask16)__A);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_load_mask16(__mmask16 *__A) {
-  return (__mmask16)__builtin_ia32_kmovw(*(__mmask16 *)__A);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_store_mask16(__mmask16 *__A, __mmask16 __B) {
-  *(__mmask16 *)__A = __builtin_ia32_kmovw((__mmask16)__B);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_stream_si512 (void * __P, __m512i __A)
-{
-  typedef __v8di __v8di_aligned __attribute__((aligned(64)));
-  __builtin_nontemporal_store((__v8di_aligned)__A, (__v8di_aligned*)__P);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_stream_load_si512 (void const *__P)
-{
-  typedef __v8di __v8di_aligned __attribute__((aligned(64)));
-  return (__m512i) __builtin_nontemporal_load((const __v8di_aligned *)__P);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_stream_pd (void *__P, __m512d __A)
-{
-  typedef __v8df __v8df_aligned __attribute__((aligned(64)));
-  __builtin_nontemporal_store((__v8df_aligned)__A, (__v8df_aligned*)__P);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_stream_ps (void *__P, __m512 __A)
-{
-  typedef __v16sf __v16sf_aligned __attribute__((aligned(64)));
-  __builtin_nontemporal_store((__v16sf_aligned)__A, (__v16sf_aligned*)__P);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
-                  (__v8df) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
-                  (__v8df)
-                  _mm512_setzero_pd (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
-                  (__v8di) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_epi64 (__mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
-                  (__v8di)
-                  _mm512_setzero_si512 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
-                 (__v16sf) __W,
-                 (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
-                 (__v16sf)
-                 _mm512_setzero_ps (),
-                 (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
-                  (__v16si) __W,
-                  (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
-                  (__v16si)
-                  _mm512_setzero_si512 (),
-                  (__mmask16) __U);
-}
-
-#define _mm_cmp_round_ss_mask(X, Y, P, R) \
-  ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                       (__v4sf)(__m128)(Y), (int)(P), \
-                                       (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \
-  ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                       (__v4sf)(__m128)(Y), (int)(P), \
-                                       (__mmask8)(M), (int)(R)))
-
-#define _mm_cmp_ss_mask(X, Y, P) \
-  ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                       (__v4sf)(__m128)(Y), (int)(P), \
-                                       (__mmask8)-1, \
-                                       _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_cmp_ss_mask(M, X, Y, P) \
-  ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                       (__v4sf)(__m128)(Y), (int)(P), \
-                                       (__mmask8)(M), \
-                                       _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_cmp_round_sd_mask(X, Y, P, R) \
-  ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                       (__v2df)(__m128d)(Y), (int)(P), \
-                                       (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \
-  ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                       (__v2df)(__m128d)(Y), (int)(P), \
-                                       (__mmask8)(M), (int)(R)))
-
-#define _mm_cmp_sd_mask(X, Y, P) \
-  ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                       (__v2df)(__m128d)(Y), (int)(P), \
-                                       (__mmask8)-1, \
-                                       _MM_FROUND_CUR_DIRECTION))
-
-#define _mm_mask_cmp_sd_mask(M, X, Y, P) \
-  ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                       (__v2df)(__m128d)(Y), (int)(P), \
-                                       (__mmask8)(M), \
-                                       _MM_FROUND_CUR_DIRECTION))
-
-/* Bit Test */
-
-static __inline __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_test_epi32_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi32_mask (_mm512_and_epi32(__A, __B),
-                                   _mm512_setzero_si512());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi32_mask (__U, _mm512_and_epi32 (__A, __B),
-                                        _mm512_setzero_si512());
-}
-
-static __inline __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_test_epi64_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi64_mask (_mm512_and_epi32 (__A, __B),
-                                   _mm512_setzero_si512());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi64_mask (__U, _mm512_and_epi32 (__A, __B),
-                                        _mm512_setzero_si512());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi32_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi32_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi32_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi64_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi64_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi64_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_movehdup_ps (__m512 __A)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
-                         1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_movehdup_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_movehdup_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_movehdup_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_moveldup_ps (__m512 __A)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
-                         0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_moveldup_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_moveldup_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_moveldup_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B), __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B),
-                                     _mm_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B), __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B),
-                                     _mm_setzero_pd());
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_ss (float * __W, __mmask8 __U, __m128 __A)
-{
-  __builtin_ia32_storess128_mask ((__v4sf *)__W, __A, __U & 1);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_sd (double * __W, __mmask8 __U, __m128d __A)
-{
-  __builtin_ia32_storesd128_mask ((__v2df *)__W, __A, __U & 1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_load_ss (__m128 __W, __mmask8 __U, const float* __A)
-{
-  __m128 src = (__v4sf) __builtin_shufflevector((__v4sf) __W,
-                                                (__v4sf)_mm_setzero_ps(),
-                                                0, 4, 4, 4);
-
-  return (__m128) __builtin_ia32_loadss128_mask ((const __v4sf *) __A, src, __U & 1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_load_ss (__mmask8 __U, const float* __A)
-{
-  return (__m128)__builtin_ia32_loadss128_mask ((const __v4sf *) __A,
-                                                (__v4sf) _mm_setzero_ps(),
-                                                __U & 1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_load_sd (__m128d __W, __mmask8 __U, const double* __A)
-{
-  __m128d src = (__v2df) __builtin_shufflevector((__v2df) __W,
-                                                 (__v2df)_mm_setzero_pd(),
-                                                 0, 2);
-
-  return (__m128d) __builtin_ia32_loadsd128_mask ((const __v2df *) __A, src, __U & 1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_load_sd (__mmask8 __U, const double* __A)
-{
-  return (__m128d) __builtin_ia32_loadsd128_mask ((const __v2df *) __A,
-                                                  (__v2df) _mm_setzero_pd(),
-                                                  __U & 1);
-}
-
-#define _mm512_shuffle_epi32(A, I) \
-  ((__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I)))
-
-#define _mm512_mask_shuffle_epi32(W, U, A, I) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_shuffle_epi32((A), (I)), \
-                                       (__v16si)(__m512i)(W)))
-
-#define _mm512_maskz_shuffle_epi32(U, A, I) \
-  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                       (__v16si)_mm512_shuffle_epi32((A), (I)), \
-                                       (__v16si)_mm512_setzero_si512()))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
-                (__v8df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
-                (__v8df) _mm512_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
-                (__v8di) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_epi64 ( __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
-                (__v8di) _mm512_setzero_si512 (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
-              (__v8df) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
-              (__v8df) _mm512_setzero_pd(),
-              (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
-              (__v8di) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
-              (__v8di) _mm512_setzero_si512(),
-              (__mmask8) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
-                   (__v16sf) _mm512_setzero_ps(),
-                   (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
-              (__v16si) __W,
-              (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
-              (__v16si) _mm512_setzero_si512(),
-              (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
-               (__v16sf) __W,
-               (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
-               (__v16sf) _mm512_setzero_ps(),
-               (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
-                (__v16si) __W,
-                (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
-                (__v16si) _mm512_setzero_si512(),
-                (__mmask16) __U);
-}
-
-#define _mm512_cvt_roundps_pd(A, R) \
-  ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                            (__v8df)_mm512_undefined_pd(), \
-                                            (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_cvt_roundps_pd(W, U, A, R) \
-  ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), (int)(R)))
-
-#define _mm512_maskz_cvt_roundps_pd(U, A, R) \
-  ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R)))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtps_pd (__m256 __A)
-{
-  return (__m512d) __builtin_convertvector((__v8sf)__A, __v8df);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_pd (__m512d __W, __mmask8 __U, __m256 __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_cvtps_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_cvtps_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtpslo_pd (__m512 __A)
-{
-  return (__m512d) _mm512_cvtps_pd(_mm512_castps512_ps256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpslo_pd (__m512d __W, __mmask8 __U, __m512 __A)
-{
-  return (__m512d) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
-              (__v8df) __A,
-              (__v8df) __W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
-              (__v8df) __A,
-              (__v8df) _mm512_setzero_pd ());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
-             (__v16sf) __A,
-             (__v16sf) __W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
-             (__v16sf) __A,
-             (__v16sf) _mm512_setzero_ps ());
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A)
-{
-  __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A)
-{
-  __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A)
-{
-  __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A,
-            (__mmask16) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A)
-{
-  __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A,
-            (__mmask16) __U);
-}
-
-#define _mm_cvt_roundsd_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v4sf)_mm_undefined_ps(), \
-                                              (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) \
-  ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v4sf)(__m128)(W), \
-                                              (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) \
-  ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B)
-{
-  return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A,
-                                             (__v2df)__B,
-                                             (__v4sf)__W,
-                                             (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
-{
-  return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A,
-                                             (__v2df)__B,
-                                             (__v4sf)_mm_setzero_ps(),
-                                             (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_cvtss_i32 _mm_cvtss_si32
-#define _mm_cvtsd_i32 _mm_cvtsd_si32
-#define _mm_cvti32_sd _mm_cvtsi32_sd
-#define _mm_cvti32_ss _mm_cvtsi32_ss
-#ifdef __x86_64__
-#define _mm_cvtss_i64 _mm_cvtss_si64
-#define _mm_cvtsd_i64 _mm_cvtsd_si64
-#define _mm_cvti64_sd _mm_cvtsi64_sd
-#define _mm_cvti64_ss _mm_cvtsi64_ss
-#endif
-
-#ifdef __x86_64__
-#define _mm_cvt_roundi64_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
-                                      (int)(R)))
-
-#define _mm_cvt_roundsi64_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
-                                      (int)(R)))
-#endif
-
-#define _mm_cvt_roundsi32_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)))
-
-#define _mm_cvt_roundi32_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsi64_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
-                                     (int)(R)))
-
-#define _mm_cvt_roundi64_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
-                                     (int)(R)))
-#endif
-
-#define _mm_cvt_roundss_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v2df)_mm_undefined_pd(), \
-                                               (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) \
-  ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v2df)(__m128d)(W), \
-                                               (__mmask8)(U), (int)(R)))
-
-#define _mm_maskz_cvt_roundss_sd(U, A, B, R) \
-  ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(U), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B)
-{
-  return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A,
-                                            (__v4sf)__B,
-                                            (__v2df)__W,
-                                            (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtss_sd (__mmask8 __U, __m128d __A, __m128 __B)
-{
-  return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A,
-                                            (__v4sf)__B,
-                                            (__v2df)_mm_setzero_pd(),
-                                            (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_cvtu32_sd (__m128d __A, unsigned __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundu64_sd(A, B, R) \
-  ((__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
-                                       (unsigned long long)(B), (int)(R)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_cvtu64_sd (__m128d __A, unsigned long long __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-#endif
-
-#define _mm_cvt_roundu32_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
-                                      (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_cvtu32_ss (__m128 __A, unsigned __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundu64_ss(A, B, R) \
-  ((__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
-                                      (unsigned long long)(B), (int)(R)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_cvtu64_ss (__m128 __A, unsigned long long __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-#endif
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
-{
-  return (__m512i) __builtin_ia32_selectd_512(__M,
-                                              (__v16si) _mm512_set1_epi32(__A),
-                                              (__v16si) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
-{
-  return (__m512i) __builtin_ia32_selectq_512(__M,
-                                              (__v8di) _mm512_set1_epi64(__A),
-                                              (__v8di) __O);
-}
-
-static  __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi8 (char __e63, char __e62, char __e61, char __e60, char __e59,
-    char __e58, char __e57, char __e56, char __e55, char __e54, char __e53,
-    char __e52, char __e51, char __e50, char __e49, char __e48, char __e47,
-    char __e46, char __e45, char __e44, char __e43, char __e42, char __e41,
-    char __e40, char __e39, char __e38, char __e37, char __e36, char __e35,
-    char __e34, char __e33, char __e32, char __e31, char __e30, char __e29,
-    char __e28, char __e27, char __e26, char __e25, char __e24, char __e23,
-    char __e22, char __e21, char __e20, char __e19, char __e18, char __e17,
-    char __e16, char __e15, char __e14, char __e13, char __e12, char __e11,
-    char __e10, char __e9, char __e8, char __e7, char __e6, char __e5,
-    char __e4, char __e3, char __e2, char __e1, char __e0) {
-
-  return __extension__ (__m512i)(__v64qi)
-    {__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7,
-     __e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15,
-     __e16, __e17, __e18, __e19, __e20, __e21, __e22, __e23,
-     __e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31,
-     __e32, __e33, __e34, __e35, __e36, __e37, __e38, __e39,
-     __e40, __e41, __e42, __e43, __e44, __e45, __e46, __e47,
-     __e48, __e49, __e50, __e51, __e52, __e53, __e54, __e55,
-     __e56, __e57, __e58, __e59, __e60, __e61, __e62, __e63};
-}
-
-static  __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi16(short __e31, short __e30, short __e29, short __e28,
-    short __e27, short __e26, short __e25, short __e24, short __e23,
-    short __e22, short __e21, short __e20, short __e19, short __e18,
-    short __e17, short __e16, short __e15, short __e14, short __e13,
-    short __e12, short __e11, short __e10, short __e9, short __e8,
-    short __e7, short __e6, short __e5, short __e4, short __e3,
-    short __e2, short __e1, short __e0) {
-  return __extension__ (__m512i)(__v32hi)
-    {__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7,
-     __e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15,
-     __e16, __e17, __e18, __e19, __e20, __e21, __e22, __e23,
-     __e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31 };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi32 (int __A, int __B, int __C, int __D,
-     int __E, int __F, int __G, int __H,
-     int __I, int __J, int __K, int __L,
-     int __M, int __N, int __O, int __P)
-{
-  return __extension__ (__m512i)(__v16si)
-  { __P, __O, __N, __M, __L, __K, __J, __I,
-    __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_epi32(e0,e1,e2,e3,e4,e5,e6,e7,           \
-       e8,e9,e10,e11,e12,e13,e14,e15)          \
-  _mm512_set_epi32((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6), \
-                   (e5),(e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi64 (long long __A, long long __B, long long __C,
-     long long __D, long long __E, long long __F,
-     long long __G, long long __H)
-{
-  return __extension__ (__m512i) (__v8di)
-  { __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7)           \
-  _mm512_set_epi64((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_set_pd (double __A, double __B, double __C, double __D,
-        double __E, double __F, double __G, double __H)
-{
-  return __extension__ (__m512d)
-  { __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7)              \
-  _mm512_set_pd((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_set_ps (float __A, float __B, float __C, float __D,
-        float __E, float __F, float __G, float __H,
-        float __I, float __J, float __K, float __L,
-        float __M, float __N, float __O, float __P)
-{
-  return __extension__ (__m512)
-  { __P, __O, __N, __M, __L, __K, __J, __I,
-    __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_ps(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15) \
-  _mm512_set_ps((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6),(e5), \
-                (e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_abs_ps(__m512 __A)
-{
-  return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_ps(__m512 __W, __mmask16 __K, __m512 __A)
-{
-  return (__m512)_mm512_mask_and_epi32((__m512i)__W, __K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_abs_pd(__m512d __A)
-{
-  return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A) ;
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
-{
-  return (__m512d)_mm512_mask_and_epi64((__v8di)__W, __K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A);
-}
-
-/* Vector-reduction arithmetic accepts vectors as inputs and produces scalars as
- * outputs. This class of vector operation forms the basis of many scientific
- * computations. In vector-reduction arithmetic, the evaluation order is
- * independent of the order of the input elements of V.
-
- * For floating-point intrinsics:
- * 1. When using fadd/fmul intrinsics, the order of operations within the
- * vector is unspecified (associative math).
- * 2. When using fmin/fmax intrinsics, NaN or -0.0 elements within the vector
- * produce unspecified results.
-
- * Used bisection method. At each step, we partition the vector with previous
- * step in half, and the operation is performed on its two halves.
- * This takes log2(n) steps where n is the number of elements in the vector.
- */
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_add_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_mul_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_and_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_or_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi64(__M, __W);
-  return __builtin_ia32_reduce_add_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W);
-  return __builtin_ia32_reduce_mul_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
-  return __builtin_ia32_reduce_and_q512(__W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi64(__M, __W);
-  return __builtin_ia32_reduce_or_q512(__W);
-}
-
-// -0.0 is used to ignore the start value since it is the neutral value of
-// floating point addition. For more information, please refer to
-// https://llvm.org/docs/LangRef.html#llvm-vector-reduce-fadd-intrinsic
-static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_add_pd(__m512d __W) {
-  return __builtin_ia32_reduce_fadd_pd512(-0.0, __W);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_pd(__m512d __W) {
-  return __builtin_ia32_reduce_fmul_pd512(1.0, __W);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W) {
-  __W = _mm512_maskz_mov_pd(__M, __W);
-  return __builtin_ia32_reduce_fadd_pd512(-0.0, __W);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
-  __W = _mm512_mask_mov_pd(_mm512_set1_pd(1.0), __M, __W);
-  return __builtin_ia32_reduce_fmul_pd512(1.0, __W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_add_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_add_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_mul_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_mul_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_and_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_and_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_or_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_or_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi32(__M, __W);
-  return __builtin_ia32_reduce_add_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W);
-  return __builtin_ia32_reduce_mul_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
-  return __builtin_ia32_reduce_and_d512((__v16si)__W);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi32(__M, __W);
-  return __builtin_ia32_reduce_or_d512((__v16si)__W);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_add_ps(__m512 __W) {
-  return __builtin_ia32_reduce_fadd_ps512(-0.0f, __W);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_mul_ps(__m512 __W) {
-  return __builtin_ia32_reduce_fmul_ps512(1.0f, __W);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W) {
-  __W = _mm512_maskz_mov_ps(__M, __W);
-  return __builtin_ia32_reduce_fadd_ps512(-0.0f, __W);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
-  __W = _mm512_mask_mov_ps(_mm512_set1_ps(1.0f), __M, __W);
-  return __builtin_ia32_reduce_fmul_ps512(1.0f, __W);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epi64(__m512i __V) {
-  return __builtin_ia32_reduce_smax_q512(__V);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epu64(__m512i __V) {
-  return __builtin_ia32_reduce_umax_q512(__V);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epi64(__m512i __V) {
-  return __builtin_ia32_reduce_smin_q512(__V);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epu64(__m512i __V) {
-  return __builtin_ia32_reduce_umin_q512(__V);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
-  return __builtin_ia32_reduce_smax_q512(__V);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_maskz_mov_epi64(__M, __V);
-  return __builtin_ia32_reduce_umax_q512(__V);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
-  return __builtin_ia32_reduce_smin_q512(__V);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
-  return __builtin_ia32_reduce_umin_q512(__V);
-}
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epi32(__m512i __V) {
-  return __builtin_ia32_reduce_smax_d512((__v16si)__V);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epu32(__m512i __V) {
-  return __builtin_ia32_reduce_umax_d512((__v16si)__V);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epi32(__m512i __V) {
-  return __builtin_ia32_reduce_smin_d512((__v16si)__V);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epu32(__m512i __V) {
-  return __builtin_ia32_reduce_umin_d512((__v16si)__V);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
-  return __builtin_ia32_reduce_smax_d512((__v16si)__V);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_maskz_mov_epi32(__M, __V);
-  return __builtin_ia32_reduce_umax_d512((__v16si)__V);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
-  return __builtin_ia32_reduce_smin_d512((__v16si)__V);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
-  return __builtin_ia32_reduce_umin_d512((__v16si)__V);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_pd(__m512d __V) {
-  return __builtin_ia32_reduce_fmax_pd512(__V);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_pd(__m512d __V) {
-  return __builtin_ia32_reduce_fmin_pd512(__V);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __V) {
-  __V = _mm512_mask_mov_pd(_mm512_set1_pd(-__builtin_inf()), __M, __V);
-  return __builtin_ia32_reduce_fmax_pd512(__V);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __V) {
-  __V = _mm512_mask_mov_pd(_mm512_set1_pd(__builtin_inf()), __M, __V);
-  return __builtin_ia32_reduce_fmin_pd512(__V);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_ps(__m512 __V) {
-  return __builtin_ia32_reduce_fmax_ps512(__V);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_ps(__m512 __V) {
-  return __builtin_ia32_reduce_fmin_ps512(__V);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __V) {
-  __V = _mm512_mask_mov_ps(_mm512_set1_ps(-__builtin_inff()), __M, __V);
-  return __builtin_ia32_reduce_fmax_ps512(__V);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __V) {
-  __V = _mm512_mask_mov_ps(_mm512_set1_ps(__builtin_inff()), __M, __V);
-  return __builtin_ia32_reduce_fmin_ps512(__V);
-}
-
-/// Moves the least significant 32 bits of a vector of [16 x i32] to a
-///    32-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __A
-///    A vector of [16 x i32]. The least significant 32 bits are moved to the
-///    destination.
-/// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_cvtsi512_si32(__m512i __A) {
-  __v16si __b = (__v16si)__A;
-  return __b[0];
-}
-
-/// Loads 8 double-precision (64-bit) floating-point elements stored at memory
-/// locations starting at location \a base_addr at packed 32-bit integer indices
-/// stored in the lower half of \a vindex scaled by \a scale them in dst.
-///
-/// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///   dst[i+63:i] := MEM[addr+63:addr]
-/// ENDFOR
-/// dst[MAX:512] := 0
-/// \endoperation
-#define _mm512_i32logather_pd(vindex, base_addr, scale)                        \
-  _mm512_i32gather_pd(_mm512_castsi512_si256(vindex), (base_addr), (scale))
-
-/// Loads 8 double-precision (64-bit) floating-point elements from memory
-/// starting at location \a base_addr at packed 32-bit integer indices stored in
-/// the lower half of \a vindex scaled by \a scale into dst using writemask
-/// \a mask (elements are copied from \a src when the corresponding mask bit is
-/// not set).
-///
-/// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   IF mask[j]
-///     addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///     dst[i+63:i] := MEM[addr+63:addr]
-///   ELSE
-///     dst[i+63:i] := src[i+63:i]
-///   FI
-/// ENDFOR
-/// dst[MAX:512] := 0
-/// \endoperation
-#define _mm512_mask_i32logather_pd(src, mask, vindex, base_addr, scale)        \
-  _mm512_mask_i32gather_pd((src), (mask), _mm512_castsi512_si256(vindex),      \
-                           (base_addr), (scale))
-
-/// Loads 8 64-bit integer elements from memory starting at location \a base_addr
-/// at packed 32-bit integer indices stored in the lower half of \a vindex
-/// scaled by \a scale and stores them in dst.
-///
-/// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///   dst[i+63:i] := MEM[addr+63:addr]
-/// ENDFOR
-/// dst[MAX:512] := 0
-/// \endoperation
-#define _mm512_i32logather_epi64(vindex, base_addr, scale)                     \
-  _mm512_i32gather_epi64(_mm512_castsi512_si256(vindex), (base_addr), (scale))
-
-/// Loads 8 64-bit integer elements from memory starting at location \a base_addr
-/// at packed 32-bit integer indices stored in the lower half of \a vindex
-/// scaled by \a scale and stores them in dst using writemask \a mask (elements
-/// are copied from \a src when the corresponding mask bit is not set).
-///
-/// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   IF mask[j]
-///     addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///     dst[i+63:i] := MEM[addr+63:addr]
-///   ELSE
-///     dst[i+63:i] := src[i+63:i]
-///   FI
-/// ENDFOR
-/// dst[MAX:512] := 0
-/// \endoperation
-#define _mm512_mask_i32logather_epi64(src, mask, vindex, base_addr, scale)     \
-  _mm512_mask_i32gather_epi64((src), (mask), _mm512_castsi512_si256(vindex),   \
-                              (base_addr), (scale))
-
-/// Stores 8 packed double-precision (64-bit) floating-point elements in \a v1
-/// and to memory locations starting at location \a base_addr at packed 32-bit
-/// integer indices stored in \a vindex scaled by \a scale.
-///
-/// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///   MEM[addr+63:addr] := v1[i+63:i]
-/// ENDFOR
-/// \endoperation
-#define _mm512_i32loscatter_pd(base_addr, vindex, v1, scale)                   \
-  _mm512_i32scatter_pd((base_addr), _mm512_castsi512_si256(vindex), (v1), (scale))
-
-/// Stores 8 packed double-precision (64-bit) floating-point elements in \a v1
-/// to memory locations starting at location \a base_addr at packed 32-bit
-/// integer indices stored in \a vindex scaled by \a scale. Only those elements
-/// whose corresponding mask bit is set in writemask \a mask are written to
-/// memory.
-///
-/// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   IF mask[j]
-///     addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///     MEM[addr+63:addr] := a[i+63:i]
-///   FI
-/// ENDFOR
-/// \endoperation
-#define _mm512_mask_i32loscatter_pd(base_addr, mask, vindex, v1, scale)        \
-  _mm512_mask_i32scatter_pd((base_addr), (mask),                               \
-                            _mm512_castsi512_si256(vindex), (v1), (scale))
-
-/// Stores 8 packed 64-bit integer elements located in \a v1 and stores them in
-/// memory locations starting at location \a base_addr at packed 32-bit integer
-/// indices stored in \a vindex scaled by \a scale.
-///
-/// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///   MEM[addr+63:addr] := a[i+63:i]
-/// ENDFOR
-/// \endoperation
-#define _mm512_i32loscatter_epi64(base_addr, vindex, v1, scale)                \
-  _mm512_i32scatter_epi64((base_addr),                                         \
-                          _mm512_castsi512_si256(vindex), (v1), (scale))
-
-/// Stores 8 packed 64-bit integer elements located in a and stores them in
-/// memory locations starting at location \a base_addr at packed 32-bit integer
-/// indices stored in \a vindex scaled by scale using writemask \a mask (elements
-/// whose corresponding mask bit is not set are not written to memory).
-///
-/// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
-///
-/// \operation
-/// FOR j := 0 to 7
-///   i := j*64
-///   m := j*32
-///   IF mask[j]
-///     addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
-///     MEM[addr+63:addr] := a[i+63:i]
-///   FI
-/// ENDFOR
-/// \endoperation
-#define _mm512_mask_i32loscatter_epi64(base_addr, mask, vindex, v1, scale)     \
-  _mm512_mask_i32scatter_epi64((base_addr), (mask),                            \
-                               _mm512_castsi512_si256(vindex), (v1), (scale))
-
-#undef __DEFAULT_FN_ATTRS512
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __AVX512FINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlintrin.h b/linux-x86/lib64/clang/14.0.2/include/avx512vlintrin.h
deleted file mode 100644
index 0519dba..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/avx512vlintrin.h
+++ /dev/null
@@ -1,8445 +0,0 @@
-/*===---- avx512vlintrin.h - AVX512VL intrinsics ---------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512VLINTRIN_H
-#define __AVX512VLINTRIN_H
-
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(256)))
-
-typedef short __v2hi __attribute__((__vector_size__(4)));
-typedef char __v4qi __attribute__((__vector_size__(4)));
-typedef char __v2qi __attribute__((__vector_size__(2)));
-
-/* Integer compare */
-
-#define _mm_cmpeq_epi32_mask(A, B) \
-    _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epi32_mask(k, A, B) \
-    _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epi32_mask(A, B) \
-    _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epi32_mask(k, A, B) \
-    _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epi32_mask(A, B) \
-    _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epi32_mask(k, A, B) \
-    _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epi32_mask(A, B) \
-    _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epi32_mask(k, A, B) \
-    _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epi32_mask(A, B) \
-    _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epi32_mask(k, A, B) \
-    _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epi32_mask(A, B) \
-    _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epi32_mask(k, A, B) \
-    _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epi32_mask(A, B) \
-    _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epi32_mask(k, A, B) \
-    _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epi32_mask(A, B) \
-    _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epi32_mask(k, A, B) \
-    _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epi32_mask(A, B) \
-    _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epi32_mask(k, A, B) \
-    _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epi32_mask(A, B) \
-    _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epi32_mask(k, A, B) \
-    _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epi32_mask(A, B) \
-    _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epi32_mask(k, A, B) \
-    _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epi32_mask(A, B) \
-    _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epi32_mask(k, A, B) \
-    _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm_cmpeq_epu32_mask(A, B) \
-    _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epu32_mask(k, A, B) \
-    _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epu32_mask(A, B) \
-    _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epu32_mask(k, A, B) \
-    _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epu32_mask(A, B) \
-    _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epu32_mask(k, A, B) \
-    _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epu32_mask(A, B) \
-    _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epu32_mask(k, A, B) \
-    _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epu32_mask(A, B) \
-    _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epu32_mask(k, A, B) \
-    _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epu32_mask(A, B) \
-    _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epu32_mask(k, A, B) \
-    _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epu32_mask(A, B) \
-    _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epu32_mask(k, A, B) \
-    _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epu32_mask(A, B) \
-    _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epu32_mask(k, A, B) \
-    _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epu32_mask(A, B) \
-    _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epu32_mask(k, A, B) \
-    _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epu32_mask(A, B) \
-    _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epu32_mask(k, A, B) \
-    _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epu32_mask(A, B) \
-    _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epu32_mask(k, A, B) \
-    _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epu32_mask(A, B) \
-    _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epu32_mask(k, A, B) \
-    _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm_cmpeq_epi64_mask(A, B) \
-    _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epi64_mask(k, A, B) \
-    _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epi64_mask(A, B) \
-    _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epi64_mask(k, A, B) \
-    _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epi64_mask(A, B) \
-    _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epi64_mask(k, A, B) \
-    _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epi64_mask(A, B) \
-    _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epi64_mask(k, A, B) \
-    _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epi64_mask(A, B) \
-    _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epi64_mask(k, A, B) \
-    _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epi64_mask(A, B) \
-    _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epi64_mask(k, A, B) \
-    _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epi64_mask(A, B) \
-    _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epi64_mask(k, A, B) \
-    _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epi64_mask(A, B) \
-    _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epi64_mask(k, A, B) \
-    _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epi64_mask(A, B) \
-    _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epi64_mask(k, A, B) \
-    _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epi64_mask(A, B) \
-    _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epi64_mask(k, A, B) \
-    _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epi64_mask(A, B) \
-    _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epi64_mask(k, A, B) \
-    _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epi64_mask(A, B) \
-    _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epi64_mask(k, A, B) \
-    _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm_cmpeq_epu64_mask(A, B) \
-    _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epu64_mask(k, A, B) \
-    _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epu64_mask(A, B) \
-    _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epu64_mask(k, A, B) \
-    _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epu64_mask(A, B) \
-    _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epu64_mask(k, A, B) \
-    _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epu64_mask(A, B) \
-    _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epu64_mask(k, A, B) \
-    _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epu64_mask(A, B) \
-    _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epu64_mask(k, A, B) \
-    _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epu64_mask(A, B) \
-    _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epu64_mask(k, A, B) \
-    _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epu64_mask(A, B) \
-    _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epu64_mask(k, A, B) \
-    _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epu64_mask(A, B) \
-    _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epu64_mask(k, A, B) \
-    _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epu64_mask(A, B) \
-    _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epu64_mask(k, A, B) \
-    _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epu64_mask(A, B) \
-    _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epu64_mask(k, A, B) \
-    _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epu64_mask(A, B) \
-    _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epu64_mask(k, A, B) \
-    _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epu64_mask(A, B) \
-    _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epu64_mask(k, A, B) \
-    _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_add_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_add_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_add_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_add_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_sub_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_sub_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_sub_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_sub_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_add_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_add_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_add_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_add_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sub_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sub_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_sub_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_sub_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_mul_epi32(__X, __Y),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_mul_epi32(__X, __Y),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_mul_epi32(__X, __Y),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_mul_epi32(__X, __Y),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_mul_epu32(__X, __Y),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_mul_epu32(__X, __Y),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_mul_epu32(__X, __Y),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_mul_epu32(__X, __Y),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_mullo_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_mullo_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_mullo_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_mullo_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_and_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8su)__a & (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_and_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_and_epi32(_mm256_setzero_si256(), __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_and_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a & (__v4su)__b);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_and_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_and_epi32(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_andnot_epi32(__m256i __A, __m256i __B)
-{
-  return (__m256i)(~(__v8su)__A & (__v8su)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                          (__v8si)_mm256_andnot_epi32(__A, __B),
-                                          (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_andnot_epi32(_mm256_setzero_si256(),
-                                           __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_andnot_epi32(__m128i __A, __m128i __B)
-{
-  return (__m128i)(~(__v4su)__A & (__v4su)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_andnot_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_andnot_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_andnot_epi32(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_or_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8su)__a | (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_or_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_or_epi32(_mm256_setzero_si256(), __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_or_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a | (__v4su)__b);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_or_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_or_epi32(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_xor_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8su)__a ^ (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_xor_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_xor_epi32(_mm256_setzero_si256(), __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_xor_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a ^ (__v4su)__b);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_xor_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_xor_epi32(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_and_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a & (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_and_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_and_epi64(_mm256_setzero_si256(), __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_and_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a & (__v2du)__b);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_and_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_and_epi64(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_andnot_epi64(__m256i __A, __m256i __B)
-{
-  return (__m256i)(~(__v4du)__A & (__v4du)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                          (__v4di)_mm256_andnot_epi64(__A, __B),
-                                          (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_andnot_epi64(_mm256_setzero_si256(),
-                                           __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_andnot_epi64(__m128i __A, __m128i __B)
-{
-  return (__m128i)(~(__v2du)__A & (__v2du)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_andnot_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_andnot_epi64(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_or_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a | (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_or_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_or_epi64(_mm256_setzero_si256(), __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_or_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a | (__v2du)__b);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_or_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_or_epi64(_mm_setzero_si128(), __U, __A, __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_xor_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a ^ (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_xor_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)_mm256_mask_xor_epi64(_mm256_setzero_si256(), __U, __A, __B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_xor_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a ^ (__v2du)__b);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A,
-        __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_xor_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)_mm_mask_xor_epi64(_mm_setzero_si128(), __U, __A, __B);
-}
-
-#define _mm_cmp_epi32_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
-                                         (__v4si)(__m128i)(b), (int)(p), \
-                                         (__mmask8)-1))
-
-#define _mm_mask_cmp_epi32_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
-                                         (__v4si)(__m128i)(b), (int)(p), \
-                                         (__mmask8)(m)))
-
-#define _mm_cmp_epu32_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
-                                          (__v4si)(__m128i)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm_mask_cmp_epu32_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
-                                          (__v4si)(__m128i)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm256_cmp_epi32_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
-                                         (__v8si)(__m256i)(b), (int)(p), \
-                                         (__mmask8)-1))
-
-#define _mm256_mask_cmp_epi32_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
-                                         (__v8si)(__m256i)(b), (int)(p), \
-                                         (__mmask8)(m)))
-
-#define _mm256_cmp_epu32_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
-                                          (__v8si)(__m256i)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm256_mask_cmp_epu32_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
-                                          (__v8si)(__m256i)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm_cmp_epi64_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
-                                         (__v2di)(__m128i)(b), (int)(p), \
-                                         (__mmask8)-1))
-
-#define _mm_mask_cmp_epi64_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
-                                         (__v2di)(__m128i)(b), (int)(p), \
-                                         (__mmask8)(m)))
-
-#define _mm_cmp_epu64_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
-                                          (__v2di)(__m128i)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm_mask_cmp_epu64_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
-                                          (__v2di)(__m128i)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm256_cmp_epi64_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
-                                         (__v4di)(__m256i)(b), (int)(p), \
-                                         (__mmask8)-1))
-
-#define _mm256_mask_cmp_epi64_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
-                                         (__v4di)(__m256i)(b), (int)(p), \
-                                         (__mmask8)(m)))
-
-#define _mm256_cmp_epu64_mask(a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
-                                          (__v4di)(__m256i)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm256_mask_cmp_epu64_mask(m, a, b, p) \
-  ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
-                                          (__v4di)(__m256i)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm256_cmp_ps_mask(a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
-                                          (__v8sf)(__m256)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm256_mask_cmp_ps_mask(m, a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
-                                          (__v8sf)(__m256)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm256_cmp_pd_mask(a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
-                                          (__v4df)(__m256d)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm256_mask_cmp_pd_mask(m, a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
-                                          (__v4df)(__m256d)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm_cmp_ps_mask(a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
-                                          (__v4sf)(__m128)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm_mask_cmp_ps_mask(m, a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
-                                          (__v4sf)(__m128)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-#define _mm_cmp_pd_mask(a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
-                                          (__v2df)(__m128d)(b), (int)(p), \
-                                          (__mmask8)-1))
-
-#define _mm_mask_cmp_pd_mask(m, a, b, p)  \
-  ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
-                                          (__v2df)(__m128d)(b), (int)(p), \
-                                          (__mmask8)(m)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df) __C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd (-(__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df) __C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd (-(__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd (-(__v2df) __A,
-                                             (__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df) __C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df) __C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
-                                                (__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf) __C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps (-(__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf) __C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps (-(__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps (-(__v4sf) __A,
-                                             (__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf) __C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf) __C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
-                                                (__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd ((__v2df) __A,
-                                                (__v2df) __B,
-                                                (__v2df) __C),
-                    (__v2df) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd ((__v2df) __A,
-                                                (__v2df) __B,
-                                                (__v2df) __C),
-                    (__v2df) __C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd ((__v2df) __A,
-                                                (__v2df) __B,
-                                                (__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd ((__v2df) __A,
-                                                (__v2df) __B,
-                                                -(__v2df) __C),
-                    (__v2df) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd ((__v2df) __A,
-                                                (__v2df) __B,
-                                                -(__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
-                                                   (__v4df) __B,
-                                                   (__v4df) __C),
-                    (__v4df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
-                                                   (__v4df) __B,
-                                                   (__v4df) __C),
-                    (__v4df) __C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
-                                                   (__v4df) __B,
-                                                   (__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
-                                                   (__v4df) __B,
-                                                   -(__v4df) __C),
-                    (__v4df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
-                                                   (__v4df) __B,
-                                                   -(__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps ((__v4sf) __A,
-                                                (__v4sf) __B,
-                                                (__v4sf) __C),
-                    (__v4sf) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps ((__v4sf) __A,
-                                                (__v4sf) __B,
-                                                (__v4sf) __C),
-                    (__v4sf) __C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps ((__v4sf) __A,
-                                                (__v4sf) __B,
-                                                (__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps ((__v4sf) __A,
-                                                (__v4sf) __B,
-                                                -(__v4sf) __C),
-                    (__v4sf) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps ((__v4sf) __A,
-                                                (__v4sf) __B,
-                                                -(__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B,
-                         __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
-                                                   (__v8sf) __B,
-                                                   (__v8sf) __C),
-                    (__v8sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
-                                                   (__v8sf) __B,
-                                                   (__v8sf) __C),
-                    (__v8sf) __C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
-                                                   (__v8sf) __B,
-                                                   (__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
-                                                   (__v8sf) __B,
-                                                   -(__v8sf) __C),
-                    (__v8sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
-                                                   (__v8sf) __B,
-                                                   -(__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df) __C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df) __C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf) __C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf) __C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd ((__v2df) __A,
-                                                (__v2df) __B,
-                                                -(__v2df) __C),
-                    (__v2df) __C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
-                                                   (__v4df) __B,
-                                                   -(__v4df) __C),
-                    (__v4df) __C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps ((__v4sf) __A,
-                                                (__v4sf) __B,
-                                                -(__v4sf) __C),
-                    (__v4sf) __C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
-                                                   (__v8sf) __B,
-                                                   -(__v8sf) __C),
-                    (__v8sf) __C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             -(__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                -(__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             -(__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                -(__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             -(__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             -(__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df) __C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                -(__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                -(__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df) __C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             -(__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             -(__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf) __C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                -(__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                -(__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf) __C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_add_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_add_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_add_pd(__A, __B),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_add_pd(__A, __B),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_add_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_add_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_add_ps(__A, __B),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_add_ps(__A, __B),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) {
-  return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
-                (__v4si) __W,
-                (__v4si) __A);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) {
-  return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
-                (__v8si) __W,
-                (__v8si) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) {
-  return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
-                 (__v2df) __W,
-                 (__v2df) __A);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) {
-  return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
-                 (__v4df) __W,
-                 (__v4df) __A);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) {
-  return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
-                (__v4sf) __W,
-                (__v4sf) __A);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) {
-  return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
-                (__v8sf) __W,
-                (__v8sf) __A);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) {
-  return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
-                (__v2di) __W,
-                (__v2di) __A);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) {
-  return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
-                (__v4di) __W,
-                (__v4di) __A);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A) {
-  return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
-                  (__v2df) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_compress_pd (__mmask8 __U, __m128d __A) {
-  return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
-                  (__v2df)
-                  _mm_setzero_pd (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A) {
-  return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
-                  (__v4df) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) {
-  return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
-                  (__v4df)
-                  _mm256_setzero_pd (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
-                  (__v2di) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
-                  (__v2di)
-                  _mm_setzero_si128 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
-                  (__v4di) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
-                  (__v4di)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A) {
-  return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
-                 (__v4sf) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_compress_ps (__mmask8 __U, __m128 __A) {
-  return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
-                 (__v4sf)
-                 _mm_setzero_ps (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A) {
-  return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
-                 (__v8sf) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) {
-  return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
-                 (__v8sf)
-                 _mm256_setzero_ps (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
-                  (__v4si) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A) {
-  __builtin_ia32_compressstoredf128_mask ((__v2df *) __P,
-            (__v2df) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A) {
-  __builtin_ia32_compressstoredf256_mask ((__v4df *) __P,
-            (__v4df) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A) {
-  __builtin_ia32_compressstoredi128_mask ((__v2di *) __P,
-            (__v2di) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A) {
-  __builtin_ia32_compressstoredi256_mask ((__v4di *) __P,
-            (__v4di) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A) {
-  __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P,
-            (__v4sf) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A) {
-  __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P,
-            (__v8sf) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A) {
-  __builtin_ia32_compressstoresi128_mask ((__v4si *) __P,
-            (__v4si) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) {
-  __builtin_ia32_compressstoresi256_mask ((__v8si *) __P,
-            (__v8si) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
-                                              (__v2df)_mm_cvtepi32_pd(__A),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
-                                              (__v2df)_mm_cvtepi32_pd(__A),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
-                                              (__v4df)_mm256_cvtepi32_pd(__A),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
-                                              (__v4df)_mm256_cvtepi32_pd(__A),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_cvtepi32_ps(__A),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi32_ps (__mmask8 __U, __m128i __A) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_cvtepi32_ps(__A),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_cvtepi32_ps(__A),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi32_ps (__mmask8 __U, __m256i __A) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_cvtepi32_ps(__A),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
-                (__v4si) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
-                (__v4si)
-                _mm_setzero_si128 (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm256_cvtpd_epi32(__A),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm256_cvtpd_epi32(__A),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A) {
-  return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
-            (__v4sf) __W,
-            (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) {
-  return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
-            (__v4sf)
-            _mm_setzero_ps (),
-            (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm256_cvtpd_ps(__A),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm256_cvtpd_ps(__A),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtpd_epu32 (__m128d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
-                 (__v4si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtpd_epu32 (__m256d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
-                 (__v4si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) {
-  return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_cvtps_epi32(__A),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_cvtps_epi32(__A),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_cvtps_epi32(__A),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_cvtps_epi32(__A),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_cvtps_pd(__A),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_cvtps_pd(__A),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_cvtps_pd(__A),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_cvtps_pd(__A),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtps_epu32 (__m128 __A) {
-  return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
-  return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
-                 (__v4si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) {
-  return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtps_epu32 (__m256 __A) {
-  return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
-                 (__v8si)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
-  return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
-                 (__v8si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) {
-  return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
-                 (__v8si)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
-                 (__v4si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm256_cvttpd_epi32(__A),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm256_cvttpd_epi32(__A),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvttpd_epu32 (__m128d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
-                  (__v4si) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvttpd_epu32 (__m256d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
-                  (__v4si) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) {
-  return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_cvttps_epi32(__A),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_cvttps_epi32(__A),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_cvttps_epi32(__A),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_cvttps_epi32(__A),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvttps_epu32 (__m128 __A) {
-  return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
-  return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
-                  (__v4si) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) {
-  return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
-                  (__v4si)
-                  _mm_setzero_si128 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvttps_epu32 (__m256 __A) {
-  return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
-  return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) {
-  return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_cvtepu32_pd (__m128i __A) {
-  return (__m128d) __builtin_convertvector(
-      __builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
-                                              (__v2df)_mm_cvtepu32_pd(__A),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
-                                              (__v2df)_mm_cvtepu32_pd(__A),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_cvtepu32_pd (__m128i __A) {
-  return (__m256d)__builtin_convertvector((__v4su)__A, __v4df);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
-                                              (__v4df)_mm256_cvtepu32_pd(__A),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
-                                              (__v4df)_mm256_cvtepu32_pd(__A),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_cvtepu32_ps (__m128i __A) {
-  return (__m128)__builtin_convertvector((__v4su)__A, __v4sf);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_cvtepu32_ps(__A),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_cvtepu32_ps(__A),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_cvtepu32_ps (__m256i __A) {
-  return (__m256)__builtin_convertvector((__v8su)__A, __v8sf);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_cvtepu32_ps(__A),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_cvtepu32_ps(__A),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_div_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_div_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_div_pd(__A, __B),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_div_pd(__A, __B),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_div_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_div_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_div_ps(__A, __B),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_div_ps(__A, __B),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A) {
-  return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
-                (__v2df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_expand_pd (__mmask8 __U, __m128d __A) {
-  return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
-                 (__v2df)
-                 _mm_setzero_pd (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A) {
-  return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
-                (__v4df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) {
-  return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
-                 (__v4df)
-                 _mm256_setzero_pd (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
-                (__v2di) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
-                 (__v2di)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
-                (__v4di) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
-                 (__v4di)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) {
-  return (__m128d) __builtin_ia32_expandloaddf128_mask ((const __v2df *) __P,
-              (__v2df) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
-  return (__m128d) __builtin_ia32_expandloaddf128_mask ((const __v2df *) __P,
-               (__v2df)
-               _mm_setzero_pd (),
-               (__mmask8)
-               __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) {
-  return (__m256d) __builtin_ia32_expandloaddf256_mask ((const __v4df *) __P,
-              (__v4df) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
-  return (__m256d) __builtin_ia32_expandloaddf256_mask ((const __v4df *) __P,
-               (__v4df)
-               _mm256_setzero_pd (),
-               (__mmask8)
-               __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) {
-  return (__m128i) __builtin_ia32_expandloaddi128_mask ((const __v2di *) __P,
-              (__v2di) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
-  return (__m128i) __builtin_ia32_expandloaddi128_mask ((const __v2di *) __P,
-               (__v2di)
-               _mm_setzero_si128 (),
-               (__mmask8)
-               __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U,
-             void const *__P) {
-  return (__m256i) __builtin_ia32_expandloaddi256_mask ((const __v4di *) __P,
-              (__v4di) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
-  return (__m256i) __builtin_ia32_expandloaddi256_mask ((const __v4di *) __P,
-               (__v4di)
-               _mm256_setzero_si256 (),
-               (__mmask8)
-               __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P) {
-  return (__m128) __builtin_ia32_expandloadsf128_mask ((const __v4sf *) __P,
-                   (__v4sf) __W,
-                   (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
-  return (__m128) __builtin_ia32_expandloadsf128_mask ((const __v4sf *) __P,
-              (__v4sf)
-              _mm_setzero_ps (),
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P) {
-  return (__m256) __builtin_ia32_expandloadsf256_mask ((const __v8sf *) __P,
-                   (__v8sf) __W,
-                   (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
-  return (__m256) __builtin_ia32_expandloadsf256_mask ((const __v8sf *) __P,
-              (__v8sf)
-              _mm256_setzero_ps (),
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) {
-  return (__m128i) __builtin_ia32_expandloadsi128_mask ((const __v4si *) __P,
-              (__v4si) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
-  return (__m128i) __builtin_ia32_expandloadsi128_mask ((const __v4si *) __P,
-               (__v4si)
-               _mm_setzero_si128 (),
-               (__mmask8)     __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U,
-             void const *__P) {
-  return (__m256i) __builtin_ia32_expandloadsi256_mask ((const __v8si *) __P,
-              (__v8si) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
-  return (__m256i) __builtin_ia32_expandloadsi256_mask ((const __v8si *) __P,
-               (__v8si)
-               _mm256_setzero_si256 (),
-               (__mmask8)
-               __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A) {
-  return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
-               (__v4sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_expand_ps (__mmask8 __U, __m128 __A) {
-  return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
-                (__v4sf)
-                _mm_setzero_ps (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A) {
-  return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
-               (__v8sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) {
-  return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
-                (__v8sf)
-                _mm256_setzero_ps (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
-                (__v4si) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) {
-  return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
-                (__v8si) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) {
-  return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
-                 (__v8si)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_getexp_pd (__m128d __A) {
-  return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
-                (__v2df)
-                _mm_setzero_pd (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A) {
-  return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
-                (__v2df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) {
-  return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
-                (__v2df)
-                _mm_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_getexp_pd (__m256d __A) {
-  return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
-                (__v4df)
-                _mm256_setzero_pd (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A) {
-  return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
-                (__v4df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) {
-  return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
-                (__v4df)
-                _mm256_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_getexp_ps (__m128 __A) {
-  return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
-               (__v4sf)
-               _mm_setzero_ps (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A) {
-  return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
-               (__v4sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) {
-  return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
-               (__v4sf)
-               _mm_setzero_ps (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_getexp_ps (__m256 __A) {
-  return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
-               (__v8sf)
-               _mm256_setzero_ps (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A) {
-  return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
-               (__v8sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) {
-  return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
-               (__v8sf)
-               _mm256_setzero_ps (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_max_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_max_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_max_pd(__A, __B),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_max_pd(__A, __B),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_max_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_max_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_max_ps(__A, __B),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_max_ps(__A, __B),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_min_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_min_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_min_pd(__A, __B),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_min_pd(__A, __B),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_min_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_min_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_min_ps(__A, __B),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_min_ps(__A, __B),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_mul_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_mul_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_mul_pd(__A, __B),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_mul_pd(__A, __B),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_mul_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_mul_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_mul_ps(__A, __B),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_mul_ps(__A, __B),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_abs_epi32(__A),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_abs_epi32(__A),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_abs_epi32(__A),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_abs_epi32(__A),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_abs_epi64 (__m128i __A) {
-  return (__m128i)__builtin_ia32_pabsq128((__v2di)__A);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_abs_epi64(__A),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_abs_epi64(__A),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi64 (__m256i __A) {
-  return (__m256i)__builtin_ia32_pabsq256 ((__v4di)__A);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_abs_epi64(__A),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_abs_epi64(__A),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_max_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_max_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_max_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_max_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_max_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pmaxsq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_max_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_max_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pmaxsq256((__v4di)__A, (__v4di)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_max_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_max_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_max_epu32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_max_epu32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_max_epu32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_max_epu32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_max_epu64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pmaxuq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_max_epu64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_max_epu64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epu64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pmaxuq256((__v4di)__A, (__v4di)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_max_epu64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_max_epu64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_min_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_min_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_min_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_min_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_min_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pminsq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_min_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_min_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pminsq256((__v4di)__A, (__v4di)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_min_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_min_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_min_epu32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm_min_epu32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_min_epu32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                             (__v8si)_mm256_min_epu32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_min_epu64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pminuq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_min_epu64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
-                                             (__v2di)_mm_min_epu64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epu64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pminuq256((__v4di)__A, (__v4di)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_min_epu64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                             (__v4di)_mm256_min_epu64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-#define _mm_roundscale_pd(A, imm) \
-  ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
-                                               (int)(imm), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)-1))
-
-
-#define _mm_mask_roundscale_pd(W, U, A, imm) \
-  ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
-                                               (int)(imm), \
-                                               (__v2df)(__m128d)(W), \
-                                               (__mmask8)(U)))
-
-
-#define _mm_maskz_roundscale_pd(U, A, imm) \
-  ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
-                                               (int)(imm), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(U)))
-
-
-#define _mm256_roundscale_pd(A, imm) \
-  ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
-                                               (int)(imm), \
-                                               (__v4df)_mm256_setzero_pd(), \
-                                               (__mmask8)-1))
-
-
-#define _mm256_mask_roundscale_pd(W, U, A, imm) \
-  ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
-                                               (int)(imm), \
-                                               (__v4df)(__m256d)(W), \
-                                               (__mmask8)(U)))
-
-
-#define _mm256_maskz_roundscale_pd(U, A, imm)  \
-  ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
-                                               (int)(imm), \
-                                               (__v4df)_mm256_setzero_pd(), \
-                                               (__mmask8)(U)))
-
-#define _mm_roundscale_ps(A, imm)  \
-  ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)-1))
-
-
-#define _mm_mask_roundscale_ps(W, U, A, imm)  \
-  ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                              (__v4sf)(__m128)(W), \
-                                              (__mmask8)(U)))
-
-
-#define _mm_maskz_roundscale_ps(U, A, imm)  \
-  ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(U)))
-
-#define _mm256_roundscale_ps(A, imm)  \
-  ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
-                                              (__v8sf)_mm256_setzero_ps(), \
-                                              (__mmask8)-1))
-
-#define _mm256_mask_roundscale_ps(W, U, A, imm)  \
-  ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
-                                              (__v8sf)(__m256)(W), \
-                                              (__mmask8)(U)))
-
-
-#define _mm256_maskz_roundscale_ps(U, A, imm)  \
-  ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
-                                              (__v8sf)_mm256_setzero_ps(), \
-                                              (__mmask8)(U)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_scalef_pd (__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df)
-                _mm_setzero_pd (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A,
-        __m128d __B) {
-  return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df)
-                _mm_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_scalef_pd (__m256d __A, __m256d __B) {
-  return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
-                (__v4df) __B,
-                (__v4df)
-                _mm256_setzero_pd (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A,
-           __m256d __B) {
-  return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
-                (__v4df) __B,
-                (__v4df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B) {
-  return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
-                (__v4df) __B,
-                (__v4df)
-                _mm256_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_scalef_ps (__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
-               (__v4sf) __B,
-               (__v4sf)
-               _mm_setzero_ps (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
-               (__v4sf) __B,
-               (__v4sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
-               (__v4sf) __B,
-               (__v4sf)
-               _mm_setzero_ps (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_scalef_ps (__m256 __A, __m256 __B) {
-  return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
-               (__v8sf) __B,
-               (__v8sf)
-               _mm256_setzero_ps (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A,
-           __m256 __B) {
-  return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
-               (__v8sf) __B,
-               (__v8sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
-  return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
-               (__v8sf) __B,
-               (__v8sf)
-               _mm256_setzero_ps (),
-               (__mmask8) __U);
-}
-
-#define _mm_i64scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)-1, \
-                               (__v2di)(__m128i)(index), \
-                               (__v2df)(__m128d)(v1), (int)(scale))
-
-#define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)(mask), \
-                               (__v2di)(__m128i)(index), \
-                               (__v2df)(__m128d)(v1), (int)(scale))
-
-#define _mm_i64scatter_epi64(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)-1, \
-                               (__v2di)(__m128i)(index), \
-                               (__v2di)(__m128i)(v1), (int)(scale))
-
-#define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)(mask), \
-                               (__v2di)(__m128i)(index), \
-                               (__v2di)(__m128i)(v1), (int)(scale))
-
-#define _mm256_i64scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)-1, \
-                               (__v4di)(__m256i)(index), \
-                               (__v4df)(__m256d)(v1), (int)(scale))
-
-#define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)(mask), \
-                               (__v4di)(__m256i)(index), \
-                               (__v4df)(__m256d)(v1), (int)(scale))
-
-#define _mm256_i64scatter_epi64(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)-1, \
-                               (__v4di)(__m256i)(index), \
-                               (__v4di)(__m256i)(v1), (int)(scale))
-
-#define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)(mask), \
-                               (__v4di)(__m256i)(index), \
-                               (__v4di)(__m256i)(v1), (int)(scale))
-
-#define _mm_i64scatter_ps(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)-1, \
-                               (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
-                               (int)(scale))
-
-#define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)(mask), \
-                               (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
-                               (int)(scale))
-
-#define _mm_i64scatter_epi32(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)-1, \
-                               (__v2di)(__m128i)(index), \
-                               (__v4si)(__m128i)(v1), (int)(scale))
-
-#define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)(mask), \
-                               (__v2di)(__m128i)(index), \
-                               (__v4si)(__m128i)(v1), (int)(scale))
-
-#define _mm256_i64scatter_ps(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)-1, \
-                               (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
-                               (int)(scale))
-
-#define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)(mask), \
-                               (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
-                               (int)(scale))
-
-#define _mm256_i64scatter_epi32(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)-1, \
-                               (__v4di)(__m256i)(index), \
-                               (__v4si)(__m128i)(v1), (int)(scale))
-
-#define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)(mask), \
-                               (__v4di)(__m256i)(index), \
-                               (__v4si)(__m128i)(v1), (int)(scale))
-
-#define _mm_i32scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)-1, \
-                               (__v4si)(__m128i)(index), \
-                               (__v2df)(__m128d)(v1), (int)(scale))
-
-#define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)(mask), \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v2df)(__m128d)(v1), (int)(scale))
-
-#define _mm_i32scatter_epi64(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)-1, \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v2di)(__m128i)(v1), (int)(scale))
-
-#define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)(mask), \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v2di)(__m128i)(v1), (int)(scale))
-
-#define _mm256_i32scatter_pd(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)-1, \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v4df)(__m256d)(v1), (int)(scale))
-
-#define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)(mask), \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v4df)(__m256d)(v1), (int)(scale))
-
-#define _mm256_i32scatter_epi64(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)-1, \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v4di)(__m256i)(v1), (int)(scale))
-
-#define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)(mask), \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v4di)(__m256i)(v1), (int)(scale))
-
-#define _mm_i32scatter_ps(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)-1, \
-                                 (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
-                                 (int)(scale))
-
-#define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)(mask), \
-                                 (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
-                                 (int)(scale))
-
-#define _mm_i32scatter_epi32(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)-1, \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v4si)(__m128i)(v1), (int)(scale))
-
-#define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)(mask), \
-                                 (__v4si)(__m128i)(index), \
-                                 (__v4si)(__m128i)(v1), (int)(scale))
-
-#define _mm256_i32scatter_ps(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)-1, \
-                                 (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
-                                 (int)(scale))
-
-#define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)(mask), \
-                                 (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
-                                 (int)(scale))
-
-#define _mm256_i32scatter_epi32(addr, index, v1, scale) \
-    __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)-1, \
-                                 (__v8si)(__m256i)(index), \
-                                 (__v8si)(__m256i)(v1), (int)(scale))
-
-#define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
-    __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)(mask), \
-                                 (__v8si)(__m256i)(index), \
-                                 (__v8si)(__m256i)(v1), (int)(scale))
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) {
-    return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                                (__v2df)_mm_sqrt_pd(__A),
-                                                (__v2df)__W);
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) {
-    return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                                (__v2df)_mm_sqrt_pd(__A),
-                                                (__v2df)_mm_setzero_pd());
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) {
-    return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                                (__v4df)_mm256_sqrt_pd(__A),
-                                                (__v4df)__W);
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) {
-    return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                                (__v4df)_mm256_sqrt_pd(__A),
-                                                (__v4df)_mm256_setzero_pd());
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) {
-    return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                               (__v4sf)_mm_sqrt_ps(__A),
-                                               (__v4sf)__W);
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) {
-    return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                               (__v4sf)_mm_sqrt_ps(__A),
-                                               (__v4sf)_mm_setzero_ps());
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) {
-    return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                               (__v8sf)_mm256_sqrt_ps(__A),
-                                               (__v8sf)__W);
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) {
-    return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                               (__v8sf)_mm256_sqrt_ps(__A),
-                                               (__v8sf)_mm256_setzero_ps());
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
-    return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                                (__v2df)_mm_sub_pd(__A, __B),
-                                                (__v2df)__W);
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) {
-    return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                                (__v2df)_mm_sub_pd(__A, __B),
-                                                (__v2df)_mm_setzero_pd());
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
-    return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                                (__v4df)_mm256_sub_pd(__A, __B),
-                                                (__v4df)__W);
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) {
-    return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                                (__v4df)_mm256_sub_pd(__A, __B),
-                                                (__v4df)_mm256_setzero_pd());
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
-    return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                               (__v4sf)_mm_sub_ps(__A, __B),
-                                               (__v4sf)__W);
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) {
-    return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                               (__v4sf)_mm_sub_ps(__A, __B),
-                                               (__v4sf)_mm_setzero_ps());
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
-    return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                               (__v8sf)_mm256_sub_ps(__A, __B),
-                                               (__v8sf)__W);
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) {
-    return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                               (__v8sf)_mm256_sub_ps(__A, __B),
-                                               (__v8sf)_mm256_setzero_ps());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_permutex2var_epi32(__m128i __A, __m128i __I, __m128i __B) {
-    return (__m128i)__builtin_ia32_vpermi2vard128((__v4si) __A, (__v4si)__I,
-                                                  (__v4si)__B);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_permutex2var_epi32(__m128i __A, __mmask8 __U, __m128i __I,
-                              __m128i __B) {
-    return (__m128i)__builtin_ia32_selectd_128(__U,
-                                    (__v4si)_mm_permutex2var_epi32(__A, __I, __B),
-                                    (__v4si)__A);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U,
-                               __m128i __B) {
-    return (__m128i)__builtin_ia32_selectd_128(__U,
-                                    (__v4si)_mm_permutex2var_epi32(__A, __I, __B),
-                                    (__v4si)__I);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_permutex2var_epi32(__mmask8 __U, __m128i __A, __m128i __I,
-                               __m128i __B) {
-    return (__m128i)__builtin_ia32_selectd_128(__U,
-                                    (__v4si)_mm_permutex2var_epi32(__A, __I, __B),
-                                    (__v4si)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_permutex2var_epi32(__m256i __A, __m256i __I, __m256i __B) {
-    return (__m256i)__builtin_ia32_vpermi2vard256((__v8si)__A, (__v8si) __I,
-                                                  (__v8si) __B);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_permutex2var_epi32(__m256i __A, __mmask8 __U, __m256i __I,
-                                 __m256i __B) {
-    return (__m256i)__builtin_ia32_selectd_256(__U,
-                                 (__v8si)_mm256_permutex2var_epi32(__A, __I, __B),
-                                 (__v8si)__A);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask2_permutex2var_epi32(__m256i __A, __m256i __I, __mmask8 __U,
-                                  __m256i __B) {
-    return (__m256i)__builtin_ia32_selectd_256(__U,
-                                 (__v8si)_mm256_permutex2var_epi32(__A, __I, __B),
-                                 (__v8si)__I);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_permutex2var_epi32(__mmask8 __U, __m256i __A, __m256i __I,
-                                  __m256i __B) {
-    return (__m256i)__builtin_ia32_selectd_256(__U,
-                                 (__v8si)_mm256_permutex2var_epi32(__A, __I, __B),
-                                 (__v8si)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_permutex2var_pd(__m128d __A, __m128i __I, __m128d __B) {
-    return (__m128d)__builtin_ia32_vpermi2varpd128((__v2df)__A, (__v2di)__I,
-                                                   (__v2df)__B);
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_mask_permutex2var_pd(__m128d __A, __mmask8 __U, __m128i __I, __m128d __B) {
-    return (__m128d)__builtin_ia32_selectpd_128(__U,
-                                       (__v2df)_mm_permutex2var_pd(__A, __I, __B),
-                                       (__v2df)__A);
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_mask2_permutex2var_pd(__m128d __A, __m128i __I, __mmask8 __U, __m128d __B) {
-    return (__m128d)__builtin_ia32_selectpd_128(__U,
-                                       (__v2df)_mm_permutex2var_pd(__A, __I, __B),
-                                       (__v2df)(__m128d)__I);
-  }
-
-  static __inline__ __m128d __DEFAULT_FN_ATTRS128
-  _mm_maskz_permutex2var_pd(__mmask8 __U, __m128d __A, __m128i __I, __m128d __B) {
-    return (__m128d)__builtin_ia32_selectpd_128(__U,
-                                       (__v2df)_mm_permutex2var_pd(__A, __I, __B),
-                                       (__v2df)_mm_setzero_pd());
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_permutex2var_pd(__m256d __A, __m256i __I, __m256d __B) {
-    return (__m256d)__builtin_ia32_vpermi2varpd256((__v4df)__A, (__v4di)__I,
-                                                   (__v4df)__B);
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_mask_permutex2var_pd(__m256d __A, __mmask8 __U, __m256i __I,
-                              __m256d __B) {
-    return (__m256d)__builtin_ia32_selectpd_256(__U,
-                                    (__v4df)_mm256_permutex2var_pd(__A, __I, __B),
-                                    (__v4df)__A);
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_mask2_permutex2var_pd(__m256d __A, __m256i __I, __mmask8 __U,
-                               __m256d __B) {
-    return (__m256d)__builtin_ia32_selectpd_256(__U,
-                                    (__v4df)_mm256_permutex2var_pd(__A, __I, __B),
-                                    (__v4df)(__m256d)__I);
-  }
-
-  static __inline__ __m256d __DEFAULT_FN_ATTRS256
-  _mm256_maskz_permutex2var_pd(__mmask8 __U, __m256d __A, __m256i __I,
-                               __m256d __B) {
-    return (__m256d)__builtin_ia32_selectpd_256(__U,
-                                    (__v4df)_mm256_permutex2var_pd(__A, __I, __B),
-                                    (__v4df)_mm256_setzero_pd());
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_permutex2var_ps(__m128 __A, __m128i __I, __m128 __B) {
-    return (__m128)__builtin_ia32_vpermi2varps128((__v4sf)__A, (__v4si)__I,
-                                                  (__v4sf)__B);
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_mask_permutex2var_ps(__m128 __A, __mmask8 __U, __m128i __I, __m128 __B) {
-    return (__m128)__builtin_ia32_selectps_128(__U,
-                                       (__v4sf)_mm_permutex2var_ps(__A, __I, __B),
-                                       (__v4sf)__A);
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_mask2_permutex2var_ps(__m128 __A, __m128i __I, __mmask8 __U, __m128 __B) {
-    return (__m128)__builtin_ia32_selectps_128(__U,
-                                       (__v4sf)_mm_permutex2var_ps(__A, __I, __B),
-                                       (__v4sf)(__m128)__I);
-  }
-
-  static __inline__ __m128 __DEFAULT_FN_ATTRS128
-  _mm_maskz_permutex2var_ps(__mmask8 __U, __m128 __A, __m128i __I, __m128 __B) {
-    return (__m128)__builtin_ia32_selectps_128(__U,
-                                       (__v4sf)_mm_permutex2var_ps(__A, __I, __B),
-                                       (__v4sf)_mm_setzero_ps());
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_permutex2var_ps(__m256 __A, __m256i __I, __m256 __B) {
-    return (__m256)__builtin_ia32_vpermi2varps256((__v8sf)__A, (__v8si)__I,
-                                                  (__v8sf) __B);
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_mask_permutex2var_ps(__m256 __A, __mmask8 __U, __m256i __I, __m256 __B) {
-    return (__m256)__builtin_ia32_selectps_256(__U,
-                                    (__v8sf)_mm256_permutex2var_ps(__A, __I, __B),
-                                    (__v8sf)__A);
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_mask2_permutex2var_ps(__m256 __A, __m256i __I, __mmask8 __U,
-                               __m256 __B) {
-    return (__m256)__builtin_ia32_selectps_256(__U,
-                                    (__v8sf)_mm256_permutex2var_ps(__A, __I, __B),
-                                    (__v8sf)(__m256)__I);
-  }
-
-  static __inline__ __m256 __DEFAULT_FN_ATTRS256
-  _mm256_maskz_permutex2var_ps(__mmask8 __U, __m256 __A, __m256i __I,
-                               __m256 __B) {
-    return (__m256)__builtin_ia32_selectps_256(__U,
-                                    (__v8sf)_mm256_permutex2var_ps(__A, __I, __B),
-                                    (__v8sf)_mm256_setzero_ps());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_permutex2var_epi64(__m128i __A, __m128i __I, __m128i __B) {
-    return (__m128i)__builtin_ia32_vpermi2varq128((__v2di)__A, (__v2di)__I,
-                                                  (__v2di)__B);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_permutex2var_epi64(__m128i __A, __mmask8 __U, __m128i __I,
-                              __m128i __B) {
-    return (__m128i)__builtin_ia32_selectq_128(__U,
-                                    (__v2di)_mm_permutex2var_epi64(__A, __I, __B),
-                                    (__v2di)__A);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask2_permutex2var_epi64(__m128i __A, __m128i __I, __mmask8 __U,
-                               __m128i __B) {
-    return (__m128i)__builtin_ia32_selectq_128(__U,
-                                    (__v2di)_mm_permutex2var_epi64(__A, __I, __B),
-                                    (__v2di)__I);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_permutex2var_epi64(__mmask8 __U, __m128i __A, __m128i __I,
-                               __m128i __B) {
-    return (__m128i)__builtin_ia32_selectq_128(__U,
-                                    (__v2di)_mm_permutex2var_epi64(__A, __I, __B),
-                                    (__v2di)_mm_setzero_si128());
-  }
-
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_permutex2var_epi64(__m256i __A, __m256i __I, __m256i __B) {
-    return (__m256i)__builtin_ia32_vpermi2varq256((__v4di)__A, (__v4di) __I,
-                                                  (__v4di) __B);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_permutex2var_epi64(__m256i __A, __mmask8 __U, __m256i __I,
-                                 __m256i __B) {
-    return (__m256i)__builtin_ia32_selectq_256(__U,
-                                 (__v4di)_mm256_permutex2var_epi64(__A, __I, __B),
-                                 (__v4di)__A);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask2_permutex2var_epi64(__m256i __A, __m256i __I, __mmask8 __U,
-                                  __m256i __B) {
-    return (__m256i)__builtin_ia32_selectq_256(__U,
-                                 (__v4di)_mm256_permutex2var_epi64(__A, __I, __B),
-                                 (__v4di)__I);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_permutex2var_epi64(__mmask8 __U, __m256i __A, __m256i __I,
-                                  __m256i __B) {
-    return (__m256i)__builtin_ia32_selectq_256(__U,
-                                 (__v4di)_mm256_permutex2var_epi64(__A, __I, __B),
-                                 (__v4di)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepi8_epi32(__A),
-                                               (__v4si)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepi8_epi32(__A),
-                                               (__v4si)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepi8_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepi8_epi32(__A),
-                                               (__v8si)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepi8_epi32(__A),
-                                               (__v8si)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepi8_epi64(__A),
-                                               (__v2di)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepi8_epi64(__A),
-                                               (__v2di)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepi8_epi64(__A),
-                                               (__v4di)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepi8_epi64(__A),
-                                               (__v4di)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepi32_epi64(__X),
-                                               (__v2di)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepi32_epi64(__X),
-                                               (__v2di)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepi32_epi64(__X),
-                                               (__v4di)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepi32_epi64(__X),
-                                               (__v4di)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepi16_epi32(__A),
-                                               (__v4si)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepi16_epi32(__A),
-                                               (__v4si)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepi16_epi32(__A),
-                                               (__v8si)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepi16_epi32(__A),
-                                               (__v8si)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepi16_epi64(__A),
-                                               (__v2di)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepi16_epi64(__A),
-                                               (__v2di)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepi16_epi64(__A),
-                                               (__v4di)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepi16_epi64(__A),
-                                               (__v4di)_mm256_setzero_si256());
-  }
-
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepu8_epi32(__A),
-                                               (__v4si)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepu8_epi32(__A),
-                                               (__v4si)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepu8_epi32(__A),
-                                               (__v8si)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepu8_epi32(__A),
-                                               (__v8si)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepu8_epi64(__A),
-                                               (__v2di)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepu8_epi64(__A),
-                                               (__v2di)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepu8_epi64(__A),
-                                               (__v4di)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepu8_epi64(__A),
-                                               (__v4di)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepu32_epi64(__X),
-                                               (__v2di)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepu32_epi64(__X),
-                                               (__v2di)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepu32_epi64(__X),
-                                               (__v4di)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepu32_epi64(__X),
-                                               (__v4di)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepu16_epi32(__A),
-                                               (__v4si)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                               (__v4si)_mm_cvtepu16_epi32(__A),
-                                               (__v4si)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepu16_epi32(__A),
-                                               (__v8si)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                               (__v8si)_mm256_cvtepu16_epi32(__A),
-                                               (__v8si)_mm256_setzero_si256());
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepu16_epi64(__A),
-                                               (__v2di)__W);
-  }
-
-  static __inline__ __m128i __DEFAULT_FN_ATTRS128
-  _mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                               (__v2di)_mm_cvtepu16_epi64(__A),
-                                               (__v2di)_mm_setzero_si128());
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepu16_epi64(__A),
-                                               (__v4di)__W);
-  }
-
-  static __inline__ __m256i __DEFAULT_FN_ATTRS256
-  _mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
-  {
-    return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                               (__v4di)_mm256_cvtepu16_epi64(__A),
-                                               (__v4di)_mm256_setzero_si256());
-  }
-
-
-#define _mm_rol_epi32(a, b) \
-  ((__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b)))
-
-#define _mm_mask_rol_epi32(w, u, a, b) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
-                                       (__v4si)_mm_rol_epi32((a), (b)), \
-                                       (__v4si)(__m128i)(w)))
-
-#define _mm_maskz_rol_epi32(u, a, b) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
-                                       (__v4si)_mm_rol_epi32((a), (b)), \
-                                       (__v4si)_mm_setzero_si128()))
-
-#define _mm256_rol_epi32(a, b) \
-  ((__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b)))
-
-#define _mm256_mask_rol_epi32(w, u, a, b) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
-                                       (__v8si)_mm256_rol_epi32((a), (b)), \
-                                       (__v8si)(__m256i)(w)))
-
-#define _mm256_maskz_rol_epi32(u, a, b) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
-                                       (__v8si)_mm256_rol_epi32((a), (b)), \
-                                       (__v8si)_mm256_setzero_si256()))
-
-#define _mm_rol_epi64(a, b) \
-  ((__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b)))
-
-#define _mm_mask_rol_epi64(w, u, a, b) \
-  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
-                                       (__v2di)_mm_rol_epi64((a), (b)), \
-                                       (__v2di)(__m128i)(w)))
-
-#define _mm_maskz_rol_epi64(u, a, b) \
-  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
-                                       (__v2di)_mm_rol_epi64((a), (b)), \
-                                       (__v2di)_mm_setzero_si128()))
-
-#define _mm256_rol_epi64(a, b) \
-  ((__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b)))
-
-#define _mm256_mask_rol_epi64(w, u, a, b) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
-                                       (__v4di)_mm256_rol_epi64((a), (b)), \
-                                       (__v4di)(__m256i)(w)))
-
-#define _mm256_maskz_rol_epi64(u, a, b) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
-                                       (__v4di)_mm256_rol_epi64((a), (b)), \
-                                       (__v4di)_mm256_setzero_si256()))
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_rolv_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_prolvd128((__v4si)__A, (__v4si)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_rolv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                             (__v4si)_mm_rolv_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_rolv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                             (__v4si)_mm_rolv_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_rolv_epi32 (__m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_prolvd256((__v8si)__A, (__v8si)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_rolv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                            (__v8si)_mm256_rolv_epi32(__A, __B),
-                                            (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_rolv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                            (__v8si)_mm256_rolv_epi32(__A, __B),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_rolv_epi64 (__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_prolvq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_rolv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128(__U,
-                                             (__v2di)_mm_rolv_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_rolv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128(__U,
-                                             (__v2di)_mm_rolv_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_rolv_epi64 (__m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_prolvq256((__v4di)__A, (__v4di)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_rolv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256(__U,
-                                            (__v4di)_mm256_rolv_epi64(__A, __B),
-                                            (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256(__U,
-                                            (__v4di)_mm256_rolv_epi64(__A, __B),
-                                            (__v4di)_mm256_setzero_si256());
-}
-
-#define _mm_ror_epi32(a, b) \
-  ((__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b)))
-
-#define _mm_mask_ror_epi32(w, u, a, b) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
-                                       (__v4si)_mm_ror_epi32((a), (b)), \
-                                       (__v4si)(__m128i)(w)))
-
-#define _mm_maskz_ror_epi32(u, a, b) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
-                                       (__v4si)_mm_ror_epi32((a), (b)), \
-                                       (__v4si)_mm_setzero_si128()))
-
-#define _mm256_ror_epi32(a, b) \
-  ((__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b)))
-
-#define _mm256_mask_ror_epi32(w, u, a, b) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
-                                       (__v8si)_mm256_ror_epi32((a), (b)), \
-                                       (__v8si)(__m256i)(w)))
-
-#define _mm256_maskz_ror_epi32(u, a, b) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
-                                       (__v8si)_mm256_ror_epi32((a), (b)), \
-                                       (__v8si)_mm256_setzero_si256()))
-
-#define _mm_ror_epi64(a, b) \
-  ((__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b)))
-
-#define _mm_mask_ror_epi64(w, u, a, b) \
-  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
-                                       (__v2di)_mm_ror_epi64((a), (b)), \
-                                       (__v2di)(__m128i)(w)))
-
-#define _mm_maskz_ror_epi64(u, a, b) \
-  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
-                                       (__v2di)_mm_ror_epi64((a), (b)), \
-                                       (__v2di)_mm_setzero_si128()))
-
-#define _mm256_ror_epi64(a, b) \
-  ((__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b)))
-
-#define _mm256_mask_ror_epi64(w, u, a, b) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
-                                       (__v4di)_mm256_ror_epi64((a), (b)), \
-                                       (__v4di)(__m256i)(w)))
-
-#define _mm256_maskz_ror_epi64(u, a, b) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
-                                       (__v4di)_mm256_ror_epi64((a), (b)), \
-                                       (__v4di)_mm256_setzero_si256()))
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sll_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sll_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_sll_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_sll_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_slli_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_slli_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_slli_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_slli_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_sll_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_sll_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_sll_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_sll_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_slli_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_slli_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_slli_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_slli_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_rorv_epi32 (__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_prorvd128((__v4si)__A, (__v4si)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_rorv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                             (__v4si)_mm_rorv_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_rorv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__U,
-                                             (__v4si)_mm_rorv_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_rorv_epi32 (__m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_prorvd256((__v8si)__A, (__v8si)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_rorv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                            (__v8si)_mm256_rorv_epi32(__A, __B),
-                                            (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_rorv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__U,
-                                            (__v8si)_mm256_rorv_epi32(__A, __B),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_rorv_epi64 (__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_prorvq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_rorv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128(__U,
-                                             (__v2di)_mm_rorv_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_rorv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128(__U,
-                                             (__v2di)_mm_rorv_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_rorv_epi64 (__m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_prorvq256((__v4di)__A, (__v4di)__B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_rorv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256(__U,
-                                            (__v4di)_mm256_rorv_epi64(__A, __B),
-                                            (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_rorv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256(__U,
-                                            (__v4di)_mm256_rorv_epi64(__A, __B),
-                                            (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_sllv_epi64(__X, __Y),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_sllv_epi64(__X, __Y),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                            (__v4di)_mm256_sllv_epi64(__X, __Y),
-                                            (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                            (__v4di)_mm256_sllv_epi64(__X, __Y),
-                                            (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sllv_epi32(__X, __Y),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sllv_epi32(__X, __Y),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_sllv_epi32(__X, __Y),
-                                            (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_sllv_epi32(__X, __Y),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srlv_epi64(__X, __Y),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srlv_epi64(__X, __Y),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                            (__v4di)_mm256_srlv_epi64(__X, __Y),
-                                            (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                            (__v4di)_mm256_srlv_epi64(__X, __Y),
-                                            (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                            (__v4si)_mm_srlv_epi32(__X, __Y),
-                                            (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                            (__v4si)_mm_srlv_epi32(__X, __Y),
-                                            (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_srlv_epi32(__X, __Y),
-                                            (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_srlv_epi32(__X, __Y),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srl_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srl_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srl_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srl_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srli_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srli_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srli_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srli_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srl_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srl_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srl_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srl_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srli_epi64(__A, __B),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srli_epi64(__A, __B),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srli_epi64(__A, __B),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srli_epi64(__A, __B),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                            (__v4si)_mm_srav_epi32(__X, __Y),
-                                            (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                            (__v4si)_mm_srav_epi32(__X, __Y),
-                                            (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_srav_epi32(__X, __Y),
-                                            (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                            (__v8si)_mm256_srav_epi32(__X, __Y),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srav_epi64(__m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_psravq128((__v2di)__X, (__v2di)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srav_epi64(__X, __Y),
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                             (__v2di)_mm_srav_epi64(__X, __Y),
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srav_epi64(__m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_psravq256((__v4di)__X, (__v4di) __Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srav_epi64(__X, __Y),
-                                             (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                             (__v4di)_mm256_srav_epi64(__X, __Y),
-                                             (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
-                 (__v4si) __A,
-                 (__v4si) __W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
-                 (__v4si) __A,
-                 (__v4si) _mm_setzero_si128 ());
-}
-
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
-{
-  return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
-                 (__v8si) __A,
-                 (__v8si) __W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A)
-{
-  return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
-                 (__v8si) __A,
-                 (__v8si) _mm256_setzero_si256 ());
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_load_epi32 (void const *__P)
-{
-  return *(const __m128i *) __P;
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_movdqa32load128_mask ((const __v4si *) __P,
-              (__v4si) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_load_epi32 (__mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_movdqa32load128_mask ((const __v4si *) __P,
-              (__v4si)
-              _mm_setzero_si128 (),
-              (__mmask8)
-              __U);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS256
-_mm256_load_epi32 (void const *__P)
-{
-  return *(const __m256i *) __P;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_movdqa32load256_mask ((const __v8si *) __P,
-              (__v8si) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_load_epi32 (__mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_movdqa32load256_mask ((const __v8si *) __P,
-              (__v8si)
-              _mm256_setzero_si256 (),
-              (__mmask8)
-              __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_store_epi32 (void *__P, __m128i __A)
-{
-  *(__m128i *) __P = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A)
-{
-  __builtin_ia32_movdqa32store128_mask ((__v4si *) __P,
-          (__v4si) __A,
-          (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS256
-_mm256_store_epi32 (void *__P, __m256i __A)
-{
-  *(__m256i *) __P = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A)
-{
-  __builtin_ia32_movdqa32store256_mask ((__v8si *) __P,
-          (__v8si) __A,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
-                 (__v2di) __A,
-                 (__v2di) __W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_epi64 (__mmask8 __U, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
-                 (__v2di) __A,
-                 (__v2di) _mm_setzero_si128 ());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
-{
-  return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
-                 (__v4di) __A,
-                 (__v4di) __W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A)
-{
-  return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
-                 (__v4di) __A,
-                 (__v4di) _mm256_setzero_si256 ());
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_load_epi64 (void const *__P)
-{
-  return *(const __m128i *) __P;
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_movdqa64load128_mask ((const __v2di *) __P,
-              (__v2di) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_load_epi64 (__mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_movdqa64load128_mask ((const __v2di *) __P,
-              (__v2di)
-              _mm_setzero_si128 (),
-              (__mmask8)
-              __U);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS256
-_mm256_load_epi64 (void const *__P)
-{
-  return *(const __m256i *) __P;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_movdqa64load256_mask ((const __v4di *) __P,
-              (__v4di) __W,
-              (__mmask8)
-              __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_load_epi64 (__mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_movdqa64load256_mask ((const __v4di *) __P,
-              (__v4di)
-              _mm256_setzero_si256 (),
-              (__mmask8)
-              __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_store_epi64 (void *__P, __m128i __A)
-{
-  *(__m128i *) __P = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A)
-{
-  __builtin_ia32_movdqa64store128_mask ((__v2di *) __P,
-          (__v2di) __A,
-          (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS256
-_mm256_store_epi64 (void *__P, __m256i __A)
-{
-  *(__m256i *) __P = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A)
-{
-  __builtin_ia32_movdqa64store256_mask ((__v4di *) __P,
-          (__v4di) __A,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_movedup_pd (__m128d __W, __mmask8 __U, __m128d __A)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_movedup_pd(__A),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_movedup_pd (__mmask8 __U, __m128d __A)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_movedup_pd(__A),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_movedup_pd (__m256d __W, __mmask8 __U, __m256d __A)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_movedup_pd(__A),
-                                              (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                              (__v4df)_mm256_movedup_pd(__A),
-                                              (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_set1_epi32(__m128i __O, __mmask8 __M, int __A)
-{
-   return (__m128i)__builtin_ia32_selectd_128(__M,
-                                              (__v4si) _mm_set1_epi32(__A),
-                                              (__v4si)__O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_set1_epi32( __mmask8 __M, int __A)
-{
-   return (__m128i)__builtin_ia32_selectd_128(__M,
-                                              (__v4si) _mm_set1_epi32(__A),
-                                              (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M, int __A)
-{
-   return (__m256i)__builtin_ia32_selectd_256(__M,
-                                              (__v8si) _mm256_set1_epi32(__A),
-                                              (__v8si)__O);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_set1_epi32( __mmask8 __M, int __A)
-{
-   return (__m256i)__builtin_ia32_selectd_256(__M,
-                                              (__v8si) _mm256_set1_epi32(__A),
-                                              (__v8si)_mm256_setzero_si256());
-}
-
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A)
-{
-  return (__m128i) __builtin_ia32_selectq_128(__M,
-                                              (__v2di) _mm_set1_epi64x(__A),
-                                              (__v2di) __O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_set1_epi64 (__mmask8 __M, long long __A)
-{
-  return (__m128i) __builtin_ia32_selectq_128(__M,
-                                              (__v2di) _mm_set1_epi64x(__A),
-                                              (__v2di) _mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_set1_epi64 (__m256i __O, __mmask8 __M, long long __A)
-{
-  return (__m256i) __builtin_ia32_selectq_256(__M,
-                                              (__v4di) _mm256_set1_epi64x(__A),
-                                              (__v4di) __O) ;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_set1_epi64 (__mmask8 __M, long long __A)
-{
-   return (__m256i) __builtin_ia32_selectq_256(__M,
-                                               (__v4di) _mm256_set1_epi64x(__A),
-                                               (__v4di) _mm256_setzero_si256());
-}
-
-#define _mm_fixupimm_pd(A, B, C, imm) \
-  ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2di)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)-1))
-
-#define _mm_mask_fixupimm_pd(A, U, B, C, imm) \
-  ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2di)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#define _mm_maskz_fixupimm_pd(U, A, B, C, imm) \
-  ((__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (__v2di)(__m128i)(C), \
-                                               (int)(imm), (__mmask8)(U)))
-
-#define _mm256_fixupimm_pd(A, B, C, imm) \
-  ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
-                                              (__v4df)(__m256d)(B), \
-                                              (__v4di)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)-1))
-
-#define _mm256_mask_fixupimm_pd(A, U, B, C, imm) \
-  ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
-                                              (__v4df)(__m256d)(B), \
-                                              (__v4di)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) \
-  ((__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \
-                                               (__v4df)(__m256d)(B), \
-                                               (__v4di)(__m256i)(C), \
-                                               (int)(imm), (__mmask8)(U)))
-
-#define _mm_fixupimm_ps(A, B, C, imm) \
-  ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm_mask_fixupimm_ps(A, U, B, C, imm) \
-  ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \
-  ((__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v4si)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#define _mm256_fixupimm_ps(A, B, C, imm) \
-  ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
-                                             (__v8sf)(__m256)(B), \
-                                             (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm256_mask_fixupimm_ps(A, U, B, C, imm) \
-  ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
-                                             (__v8sf)(__m256)(B), \
-                                             (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \
-  ((__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \
-                                              (__v8sf)(__m256)(B), \
-                                              (__v8si)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P)
-{
-  return (__m128d) __builtin_ia32_loadapd128_mask ((const __v2df *) __P,
-               (__v2df) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_load_pd (__mmask8 __U, void const *__P)
-{
-  return (__m128d) __builtin_ia32_loadapd128_mask ((const __v2df *) __P,
-               (__v2df)
-               _mm_setzero_pd (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P)
-{
-  return (__m256d) __builtin_ia32_loadapd256_mask ((const __v4df *) __P,
-               (__v4df) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_load_pd (__mmask8 __U, void const *__P)
-{
-  return (__m256d) __builtin_ia32_loadapd256_mask ((const __v4df *) __P,
-               (__v4df)
-               _mm256_setzero_pd (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P)
-{
-  return (__m128) __builtin_ia32_loadaps128_mask ((const __v4sf *) __P,
-              (__v4sf) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_load_ps (__mmask8 __U, void const *__P)
-{
-  return (__m128) __builtin_ia32_loadaps128_mask ((const __v4sf *) __P,
-              (__v4sf)
-              _mm_setzero_ps (),
-              (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P)
-{
-  return (__m256) __builtin_ia32_loadaps256_mask ((const __v8sf *) __P,
-              (__v8sf) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_load_ps (__mmask8 __U, void const *__P)
-{
-  return (__m256) __builtin_ia32_loadaps256_mask ((const __v8sf *) __P,
-              (__v8sf)
-              _mm256_setzero_ps (),
-              (__mmask8) __U);
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_loadu_epi64 (void const *__P)
-{
-  struct __loadu_epi64 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi64*)__P)->__v;
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_loaddqudi128_mask ((const __v2di *) __P,
-                 (__v2di) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_loaddqudi128_mask ((const __v2di *) __P,
-                 (__v2di)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS256
-_mm256_loadu_epi64 (void const *__P)
-{
-  struct __loadu_epi64 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi64*)__P)->__v;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_loaddqudi256_mask ((const __v4di *) __P,
-                 (__v4di) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_loaddqudi256_mask ((const __v4di *) __P,
-                 (__v4di)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U);
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_loadu_epi32 (void const *__P)
-{
-  struct __loadu_epi32 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi32*)__P)->__v;
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_loaddqusi128_mask ((const __v4si *) __P,
-                 (__v4si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
-{
-  return (__m128i) __builtin_ia32_loaddqusi128_mask ((const __v4si *) __P,
-                 (__v4si)
-                 _mm_setzero_si128 (),
-                 (__mmask8) __U);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS256
-_mm256_loadu_epi32 (void const *__P)
-{
-  struct __loadu_epi32 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi32*)__P)->__v;
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_loaddqusi256_mask ((const __v8si *) __P,
-                 (__v8si) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
-{
-  return (__m256i) __builtin_ia32_loaddqusi256_mask ((const __v8si *) __P,
-                 (__v8si)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P)
-{
-  return (__m128d) __builtin_ia32_loadupd128_mask ((const __v2df *) __P,
-               (__v2df) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_loadu_pd (__mmask8 __U, void const *__P)
-{
-  return (__m128d) __builtin_ia32_loadupd128_mask ((const __v2df *) __P,
-               (__v2df)
-               _mm_setzero_pd (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P)
-{
-  return (__m256d) __builtin_ia32_loadupd256_mask ((const __v4df *) __P,
-               (__v4df) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_loadu_pd (__mmask8 __U, void const *__P)
-{
-  return (__m256d) __builtin_ia32_loadupd256_mask ((const __v4df *) __P,
-               (__v4df)
-               _mm256_setzero_pd (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P)
-{
-  return (__m128) __builtin_ia32_loadups128_mask ((const __v4sf *) __P,
-              (__v4sf) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_loadu_ps (__mmask8 __U, void const *__P)
-{
-  return (__m128) __builtin_ia32_loadups128_mask ((const __v4sf *) __P,
-              (__v4sf)
-              _mm_setzero_ps (),
-              (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P)
-{
-  return (__m256) __builtin_ia32_loadups256_mask ((const __v8sf *) __P,
-              (__v8sf) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_loadu_ps (__mmask8 __U, void const *__P)
-{
-  return (__m256) __builtin_ia32_loadups256_mask ((const __v8sf *) __P,
-              (__v8sf)
-              _mm256_setzero_ps (),
-              (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_pd (void *__P, __mmask8 __U, __m128d __A)
-{
-  __builtin_ia32_storeapd128_mask ((__v2df *) __P,
-           (__v2df) __A,
-           (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_store_pd (void *__P, __mmask8 __U, __m256d __A)
-{
-  __builtin_ia32_storeapd256_mask ((__v4df *) __P,
-           (__v4df) __A,
-           (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_ps (void *__P, __mmask8 __U, __m128 __A)
-{
-  __builtin_ia32_storeaps128_mask ((__v4sf *) __P,
-           (__v4sf) __A,
-           (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A)
-{
-  __builtin_ia32_storeaps256_mask ((__v8sf *) __P,
-           (__v8sf) __A,
-           (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_storeu_epi64 (void *__P, __m128i __A)
-{
-  struct __storeu_epi64 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi64*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A)
-{
-  __builtin_ia32_storedqudi128_mask ((__v2di *) __P,
-             (__v2di) __A,
-             (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS256
-_mm256_storeu_epi64 (void *__P, __m256i __A)
-{
-  struct __storeu_epi64 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi64*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A)
-{
-  __builtin_ia32_storedqudi256_mask ((__v4di *) __P,
-             (__v4di) __A,
-             (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_storeu_epi32 (void *__P, __m128i __A)
-{
-  struct __storeu_epi32 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi32*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A)
-{
-  __builtin_ia32_storedqusi128_mask ((__v4si *) __P,
-             (__v4si) __A,
-             (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS256
-_mm256_storeu_epi32 (void *__P, __m256i __A)
-{
-  struct __storeu_epi32 {
-    __m256i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi32*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A)
-{
-  __builtin_ia32_storedqusi256_mask ((__v8si *) __P,
-             (__v8si) __A,
-             (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_storeu_pd (void *__P, __mmask8 __U, __m128d __A)
-{
-  __builtin_ia32_storeupd128_mask ((__v2df *) __P,
-           (__v2df) __A,
-           (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_storeu_pd (void *__P, __mmask8 __U, __m256d __A)
-{
-  __builtin_ia32_storeupd256_mask ((__v4df *) __P,
-           (__v4df) __A,
-           (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_storeu_ps (void *__P, __mmask8 __U, __m128 __A)
-{
-  __builtin_ia32_storeups128_mask ((__v4sf *) __P,
-           (__v4sf) __A,
-           (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_storeu_ps (void *__P, __mmask8 __U, __m256 __A)
-{
-  __builtin_ia32_storeups256_mask ((__v8sf *) __P,
-           (__v8sf) __A,
-           (__mmask8) __U);
-}
-
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_unpackhi_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_unpackhi_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                           (__v4df)_mm256_unpackhi_pd(__A, __B),
-                                           (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                           (__v4df)_mm256_unpackhi_pd(__A, __B),
-                                           (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_unpackhi_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_unpackhi_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                           (__v8sf)_mm256_unpackhi_ps(__A, __B),
-                                           (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                           (__v8sf)_mm256_unpackhi_ps(__A, __B),
-                                           (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_unpacklo_pd(__A, __B),
-                                              (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                              (__v2df)_mm_unpacklo_pd(__A, __B),
-                                              (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                           (__v4df)_mm256_unpacklo_pd(__A, __B),
-                                           (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                           (__v4df)_mm256_unpacklo_pd(__A, __B),
-                                           (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_unpacklo_ps(__A, __B),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_unpacklo_ps(__A, __B),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                           (__v8sf)_mm256_unpacklo_ps(__A, __B),
-                                           (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                           (__v8sf)_mm256_unpacklo_ps(__A, __B),
-                                           (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_rcp14_pd (__m128d __A)
-{
-  return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
-                (__v2df)
-                _mm_setzero_pd (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_rcp14_pd (__m128d __W, __mmask8 __U, __m128d __A)
-{
-  return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
-                (__v2df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_rcp14_pd (__mmask8 __U, __m128d __A)
-{
-  return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
-                (__v2df)
-                _mm_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_rcp14_pd (__m256d __A)
-{
-  return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
-                (__v4df)
-                _mm256_setzero_pd (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_rcp14_pd (__m256d __W, __mmask8 __U, __m256d __A)
-{
-  return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
-                (__v4df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_rcp14_pd (__mmask8 __U, __m256d __A)
-{
-  return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
-                (__v4df)
-                _mm256_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_rcp14_ps (__m128 __A)
-{
-  return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
-               (__v4sf)
-               _mm_setzero_ps (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_rcp14_ps (__m128 __W, __mmask8 __U, __m128 __A)
-{
-  return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
-               (__v4sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_rcp14_ps (__mmask8 __U, __m128 __A)
-{
-  return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
-               (__v4sf)
-               _mm_setzero_ps (),
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_rcp14_ps (__m256 __A)
-{
-  return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
-               (__v8sf)
-               _mm256_setzero_ps (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_rcp14_ps (__m256 __W, __mmask8 __U, __m256 __A)
-{
-  return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
-               (__v8sf) __W,
-               (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A)
-{
-  return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
-               (__v8sf)
-               _mm256_setzero_ps (),
-               (__mmask8) __U);
-}
-
-#define _mm_mask_permute_pd(W, U, X, C) \
-  ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
-                                        (__v2df)_mm_permute_pd((X), (C)), \
-                                        (__v2df)(__m128d)(W)))
-
-#define _mm_maskz_permute_pd(U, X, C) \
-  ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
-                                        (__v2df)_mm_permute_pd((X), (C)), \
-                                        (__v2df)_mm_setzero_pd()))
-
-#define _mm256_mask_permute_pd(W, U, X, C) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                        (__v4df)_mm256_permute_pd((X), (C)), \
-                                        (__v4df)(__m256d)(W)))
-
-#define _mm256_maskz_permute_pd(U, X, C) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                        (__v4df)_mm256_permute_pd((X), (C)), \
-                                        (__v4df)_mm256_setzero_pd()))
-
-#define _mm_mask_permute_ps(W, U, X, C) \
-  ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
-                                       (__v4sf)_mm_permute_ps((X), (C)), \
-                                       (__v4sf)(__m128)(W)))
-
-#define _mm_maskz_permute_ps(U, X, C) \
-  ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
-                                       (__v4sf)_mm_permute_ps((X), (C)), \
-                                       (__v4sf)_mm_setzero_ps()))
-
-#define _mm256_mask_permute_ps(W, U, X, C) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                       (__v8sf)_mm256_permute_ps((X), (C)), \
-                                       (__v8sf)(__m256)(W)))
-
-#define _mm256_maskz_permute_ps(U, X, C) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                       (__v8sf)_mm256_permute_ps((X), (C)), \
-                                       (__v8sf)_mm256_setzero_ps()))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                            (__v2df)_mm_permutevar_pd(__A, __C),
-                                            (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C)
-{
-  return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
-                                            (__v2df)_mm_permutevar_pd(__A, __C),
-                                            (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                         (__v4df)_mm256_permutevar_pd(__A, __C),
-                                         (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                         (__v4df)_mm256_permutevar_pd(__A, __C),
-                                         (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                            (__v4sf)_mm_permutevar_ps(__A, __C),
-                                            (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                            (__v4sf)_mm_permutevar_ps(__A, __C),
-                                            (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                          (__v8sf)_mm256_permutevar_ps(__A, __C),
-                                          (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                          (__v8sf)_mm256_permutevar_ps(__A, __C),
-                                          (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_test_epi32_mask (__m128i __A, __m128i __B)
-{
-  return _mm_cmpneq_epi32_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_mask_test_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return _mm_mask_cmpneq_epi32_mask (__U, _mm_and_si128 (__A, __B),
-                                     _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_test_epi32_mask (__m256i __A, __m256i __B)
-{
-  return _mm256_cmpneq_epi32_mask (_mm256_and_si256 (__A, __B),
-                                   _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_mask_test_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return _mm256_mask_cmpneq_epi32_mask (__U, _mm256_and_si256 (__A, __B),
-                                        _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_test_epi64_mask (__m128i __A, __m128i __B)
-{
-  return _mm_cmpneq_epi64_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_mask_test_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return _mm_mask_cmpneq_epi64_mask (__U, _mm_and_si128 (__A, __B),
-                                     _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_test_epi64_mask (__m256i __A, __m256i __B)
-{
-  return _mm256_cmpneq_epi64_mask (_mm256_and_si256 (__A, __B),
-                                   _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_mask_test_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return _mm256_mask_cmpneq_epi64_mask (__U, _mm256_and_si256 (__A, __B),
-                                        _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_testn_epi32_mask (__m128i __A, __m128i __B)
-{
-  return _mm_cmpeq_epi32_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_mask_testn_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return _mm_mask_cmpeq_epi32_mask (__U, _mm_and_si128 (__A, __B),
-                                    _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_testn_epi32_mask (__m256i __A, __m256i __B)
-{
-  return _mm256_cmpeq_epi32_mask (_mm256_and_si256 (__A, __B),
-                                  _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_mask_testn_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return _mm256_mask_cmpeq_epi32_mask (__U, _mm256_and_si256 (__A, __B),
-                                       _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_testn_epi64_mask (__m128i __A, __m128i __B)
-{
-  return _mm_cmpeq_epi64_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_mask_testn_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return _mm_mask_cmpeq_epi64_mask (__U, _mm_and_si128 (__A, __B),
-                                    _mm_setzero_si128());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_testn_epi64_mask (__m256i __A, __m256i __B)
-{
-  return _mm256_cmpeq_epi64_mask (_mm256_and_si256 (__A, __B),
-                                  _mm256_setzero_si256());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
-_mm256_mask_testn_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return _mm256_mask_cmpeq_epi64_mask (__U, _mm256_and_si256 (__A, __B),
-                                       _mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                           (__v4si)_mm_unpackhi_epi32(__A, __B),
-                                           (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                           (__v4si)_mm_unpackhi_epi32(__A, __B),
-                                           (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                        (__v8si)_mm256_unpackhi_epi32(__A, __B),
-                                        (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                        (__v8si)_mm256_unpackhi_epi32(__A, __B),
-                                        (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                           (__v2di)_mm_unpackhi_epi64(__A, __B),
-                                           (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                           (__v2di)_mm_unpackhi_epi64(__A, __B),
-                                           (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                        (__v4di)_mm256_unpackhi_epi64(__A, __B),
-                                        (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                        (__v4di)_mm256_unpackhi_epi64(__A, __B),
-                                        (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                           (__v4si)_mm_unpacklo_epi32(__A, __B),
-                                           (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                           (__v4si)_mm_unpacklo_epi32(__A, __B),
-                                           (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                        (__v8si)_mm256_unpacklo_epi32(__A, __B),
-                                        (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                        (__v8si)_mm256_unpacklo_epi32(__A, __B),
-                                        (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                           (__v2di)_mm_unpacklo_epi64(__A, __B),
-                                           (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
-                                           (__v2di)_mm_unpacklo_epi64(__A, __B),
-                                           (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                        (__v4di)_mm256_unpacklo_epi64(__A, __B),
-                                        (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
-                                        (__v4di)_mm256_unpacklo_epi64(__A, __B),
-                                        (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sra_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_sra_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_sra_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_sra_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srai_epi32(__A, __B),
-                                             (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
-                                             (__v4si)_mm_srai_epi32(__A, __B),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srai_epi32(__A, __B),
-                                             (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
-                                             (__v8si)_mm256_srai_epi32(__A, __B),
-                                             (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_sra_epi64(__m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_psraq128((__v2di)__A, (__v2di)__B);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
-                                             (__v2di)_mm_sra_epi64(__A, __B), \
-                                             (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
-                                             (__v2di)_mm_sra_epi64(__A, __B), \
-                                             (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sra_epi64(__m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_psraq256((__v4di) __A, (__v2di) __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
-                                           (__v4di)_mm256_sra_epi64(__A, __B), \
-                                           (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
-                                           (__v4di)_mm256_sra_epi64(__A, __B), \
-                                           (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srai_epi64(__m128i __A, unsigned int __imm)
-{
-  return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __imm)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
-                                           (__v2di)_mm_srai_epi64(__A, __imm), \
-                                           (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm)
-{
-  return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
-                                           (__v2di)_mm_srai_epi64(__A, __imm), \
-                                           (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi64(__m256i __A, unsigned int __imm)
-{
-  return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A,
-                       unsigned int __imm)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
-                                        (__v4di)_mm256_srai_epi64(__A, __imm), \
-                                        (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
-                                        (__v4di)_mm256_srai_epi64(__A, __imm), \
-                                        (__v4di)_mm256_setzero_si256());
-}
-
-#define _mm_ternarylogic_epi32(A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
-                                             (__v4si)(__m128i)(B), \
-                                             (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
-                                             (__v4si)(__m128i)(B), \
-                                             (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \
-                                              (__v4si)(__m128i)(B), \
-                                              (__v4si)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#define _mm256_ternarylogic_epi32(A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
-                                             (__v8si)(__m256i)(B), \
-                                             (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
-                                             (__v8si)(__m256i)(B), \
-                                             (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \
-                                              (__v8si)(__m256i)(B), \
-                                              (__v8si)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#define _mm_ternarylogic_epi64(A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
-                                             (__v2di)(__m128i)(B), \
-                                             (__v2di)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
-                                             (__v2di)(__m128i)(B), \
-                                             (__v2di)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  ((__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \
-                                              (__v2di)(__m128i)(B), \
-                                              (__v2di)(__m128i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-#define _mm256_ternarylogic_epi64(A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
-                                             (__v4di)(__m256i)(B), \
-                                             (__v4di)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)-1))
-
-#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
-                                             (__v4di)(__m256i)(B), \
-                                             (__v4di)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U)))
-
-#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  ((__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \
-                                              (__v4di)(__m256i)(B), \
-                                              (__v4di)(__m256i)(C), (int)(imm), \
-                                              (__mmask8)(U)))
-
-
-
-#define _mm256_shuffle_f32x4(A, B, imm) \
-  ((__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
-                                         (__v8sf)(__m256)(B), (int)(imm)))
-
-#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                       (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
-                                       (__v8sf)(__m256)(W)))
-
-#define _mm256_maskz_shuffle_f32x4(U, A, B, imm) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                       (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
-                                       (__v8sf)_mm256_setzero_ps()))
-
-#define _mm256_shuffle_f64x2(A, B, imm) \
-  ((__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \
-                                          (__v4df)(__m256d)(B), (int)(imm)))
-
-#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                       (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
-                                       (__v4df)(__m256d)(W)))
-
-#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                       (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
-                                       (__v4df)_mm256_setzero_pd()))
-
-#define _mm256_shuffle_i32x4(A, B, imm) \
-  ((__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \
-                                          (__v8si)(__m256i)(B), (int)(imm)))
-
-#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                       (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
-                                       (__v8si)(__m256i)(W)))
-
-#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                       (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
-                                       (__v8si)_mm256_setzero_si256()))
-
-#define _mm256_shuffle_i64x2(A, B, imm) \
-  ((__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \
-                                          (__v4di)(__m256i)(B), (int)(imm)))
-
-#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                       (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
-                                       (__v4di)(__m256i)(W)))
-
-
-#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                       (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
-                                       (__v4di)_mm256_setzero_si256()))
-
-#define _mm_mask_shuffle_pd(W, U, A, B, M) \
-  ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
-                                        (__v2df)_mm_shuffle_pd((A), (B), (M)), \
-                                        (__v2df)(__m128d)(W)))
-
-#define _mm_maskz_shuffle_pd(U, A, B, M) \
-  ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
-                                        (__v2df)_mm_shuffle_pd((A), (B), (M)), \
-                                        (__v2df)_mm_setzero_pd()))
-
-#define _mm256_mask_shuffle_pd(W, U, A, B, M) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                        (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
-                                        (__v4df)(__m256d)(W)))
-
-#define _mm256_maskz_shuffle_pd(U, A, B, M) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                        (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
-                                        (__v4df)_mm256_setzero_pd()))
-
-#define _mm_mask_shuffle_ps(W, U, A, B, M) \
-  ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
-                                       (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
-                                       (__v4sf)(__m128)(W)))
-
-#define _mm_maskz_shuffle_ps(U, A, B, M) \
-  ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
-                                       (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
-                                       (__v4sf)_mm_setzero_ps()))
-
-#define _mm256_mask_shuffle_ps(W, U, A, B, M) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                       (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
-                                       (__v8sf)(__m256)(W)))
-
-#define _mm256_maskz_shuffle_ps(U, A, B, M) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                       (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
-                                       (__v8sf)_mm256_setzero_ps()))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_rsqrt14_pd (__m128d __A)
-{
-  return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
-                 (__v2df)
-                 _mm_setzero_pd (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_rsqrt14_pd (__m128d __W, __mmask8 __U, __m128d __A)
-{
-  return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
-                 (__v2df) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt14_pd (__mmask8 __U, __m128d __A)
-{
-  return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
-                 (__v2df)
-                 _mm_setzero_pd (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_rsqrt14_pd (__m256d __A)
-{
-  return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
-                 (__v4df)
-                 _mm256_setzero_pd (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_rsqrt14_pd (__m256d __W, __mmask8 __U, __m256d __A)
-{
-  return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
-                 (__v4df) __W,
-                 (__mmask8) __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_rsqrt14_pd (__mmask8 __U, __m256d __A)
-{
-  return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
-                 (__v4df)
-                 _mm256_setzero_pd (),
-                 (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_rsqrt14_ps (__m128 __A)
-{
-  return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
-                (__v4sf)
-                _mm_setzero_ps (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_rsqrt14_ps (__m128 __W, __mmask8 __U, __m128 __A)
-{
-  return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
-                (__v4sf) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt14_ps (__mmask8 __U, __m128 __A)
-{
-  return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
-                (__v4sf)
-                _mm_setzero_ps (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_rsqrt14_ps (__m256 __A)
-{
-  return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
-                (__v8sf)
-                _mm256_setzero_ps (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_rsqrt14_ps (__m256 __W, __mmask8 __U, __m256 __A)
-{
-  return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
-                (__v8sf) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A)
-{
-  return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
-                (__v8sf)
-                _mm256_setzero_ps (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_broadcast_f32x4(__m128 __A)
-{
-  return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
-                                         0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__M,
-                                            (__v8sf)_mm256_broadcast_f32x4(__A),
-                                            (__v8sf)__O);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcast_f32x4 (__mmask8 __M, __m128 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__M,
-                                            (__v8sf)_mm256_broadcast_f32x4(__A),
-                                            (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcast_i32x4(__m128i __A)
-{
-  return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                            (__v8si)_mm256_broadcast_i32x4(__A),
-                                            (__v8si)__O);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                            (__v8si)_mm256_broadcast_i32x4(__A),
-                                            (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcastsd_pd (__m256d __O, __mmask8 __M, __m128d __A)
-{
-  return (__m256d)__builtin_ia32_selectpd_256(__M,
-                                              (__v4df) _mm256_broadcastsd_pd(__A),
-                                              (__v4df) __O);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
-{
-  return (__m256d)__builtin_ia32_selectpd_256(__M,
-                                              (__v4df) _mm256_broadcastsd_pd(__A),
-                                              (__v4df) _mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_broadcastss_ps (__m128 __O, __mmask8 __M, __m128 __A)
-{
-  return (__m128)__builtin_ia32_selectps_128(__M,
-                                             (__v4sf) _mm_broadcastss_ps(__A),
-                                             (__v4sf) __O);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
-{
-  return (__m128)__builtin_ia32_selectps_128(__M,
-                                             (__v4sf) _mm_broadcastss_ps(__A),
-                                             (__v4sf) _mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcastss_ps (__m256 __O, __mmask8 __M, __m128 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256(__M,
-                                             (__v8sf) _mm256_broadcastss_ps(__A),
-                                             (__v8sf) __O);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256(__M,
-                                             (__v8sf) _mm256_broadcastss_ps(__A),
-                                             (__v8sf) _mm256_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_broadcastd_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__M,
-                                             (__v4si) _mm_broadcastd_epi32(__A),
-                                             (__v4si) __O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectd_128(__M,
-                                             (__v4si) _mm_broadcastd_epi32(__A),
-                                             (__v4si) _mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcastd_epi32 (__m256i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__M,
-                                             (__v8si) _mm256_broadcastd_epi32(__A),
-                                             (__v8si) __O);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectd_256(__M,
-                                             (__v8si) _mm256_broadcastd_epi32(__A),
-                                             (__v8si) _mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_broadcastq_epi64 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectq_128(__M,
-                                             (__v2di) _mm_broadcastq_epi64(__A),
-                                             (__v2di) __O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i)__builtin_ia32_selectq_128(__M,
-                                             (__v2di) _mm_broadcastq_epi64(__A),
-                                             (__v2di) _mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcastq_epi64 (__m256i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectq_256(__M,
-                                             (__v4di) _mm256_broadcastq_epi64(__A),
-                                             (__v4di) __O);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
-{
-  return (__m256i)__builtin_ia32_selectq_256(__M,
-                                             (__v4di) _mm256_broadcastq_epi64(__A),
-                                             (__v4di) _mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtsepi32_epi8 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
-               (__v16qi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsepi32_epi8 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovsdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtsepi32_epi8 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
-               (__v16qi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtsepi32_epi8 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovsdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtsepi32_epi16 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
-               (__v8hi)_mm_setzero_si128 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
-               (__v8hi)__O,
-               __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsepi32_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
-               (__v8hi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovsdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtsepi32_epi16 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
-               (__v8hi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
-               (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtsepi32_epi16 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
-               (__v8hi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovsdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtsepi64_epi8 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
-               (__v16qi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsepi64_epi8 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovsqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtsepi64_epi8 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
-               (__v16qi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtsepi64_epi8 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovsqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtsepi64_epi32 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
-               (__v4si)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
-               (__v4si) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsepi64_epi32 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
-               (__v4si) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovsqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtsepi64_epi32 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
-               (__v4si)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
-               (__v4si)__O,
-               __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtsepi64_epi32 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
-               (__v4si) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovsqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtsepi64_epi16 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
-               (__v8hi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
-               (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsepi64_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
-               (__v8hi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovsqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtsepi64_epi16 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
-               (__v8hi)_mm_undefined_si128(),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
-               (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtsepi64_epi16 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
-               (__v8hi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovsqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtusepi32_epi8 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
-                (__v16qi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtusepi32_epi8 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovusdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtusepi32_epi8 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
-                (__v16qi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtusepi32_epi8 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovusdb256mem_mask ((__v16qi*) __P, (__v8si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtusepi32_epi16 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
-                (__v8hi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
-                (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtusepi32_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
-                (__v8hi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovusdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtusepi32_epi16 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
-                (__v8hi) _mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
-                (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtusepi32_epi16 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
-                (__v8hi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovusdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtusepi64_epi8 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
-                (__v16qi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtusepi64_epi8 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovusqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtusepi64_epi8 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
-                (__v16qi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtusepi64_epi8 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovusqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtusepi64_epi32 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
-                (__v4si)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
-                (__v4si) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtusepi64_epi32 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
-                (__v4si) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovusqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtusepi64_epi32 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
-                (__v4si)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
-                (__v4si) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtusepi64_epi32 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
-                (__v4si) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovusqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtusepi64_epi16 (__m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
-                (__v8hi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
-                (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtusepi64_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
-                (__v8hi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovusqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtusepi64_epi16 (__m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
-                (__v8hi)_mm_undefined_si128(),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
-                (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtusepi64_epi16 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
-                (__v8hi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi32_epi8 (__m128i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v4si)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
-      2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi32_epi8 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
-              (__v16qi)
-              _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi32_epi8 (__m256i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v8si)__A, __v8qi),
-      (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
-      12, 13, 14, 15);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi32_epi8 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi32_epi16 (__m128i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v4si)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
-      2, 3, 4, 5, 6, 7);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
-              (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi32_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
-              (__v8hi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi32_epi16 (__m256i __A)
-{
-  return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
-              (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi32_epi16 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
-              (__v8hi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_storeu_epi16 (void *  __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi64_epi8 (__m128i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v2di)__A, __v2qi), (__v2qi){0, 0}, 0, 1, 2, 3,
-      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi64_epi8 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi64_epi8 (__m256i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v4di)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
-      2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi64_epi8 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi64_epi32 (__m128i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v2di)__A, __v2si), (__v2si){0, 0}, 0, 1, 2, 3);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
-              (__v4si) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi64_epi32 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
-              (__v4si) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi64_epi32 (__m256i __A)
-{
-  return (__m128i)__builtin_convertvector((__v4di)__A, __v4si);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm256_cvtepi64_epi32(__A),
-                                             (__v4si)__O);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
-                                             (__v4si)_mm256_cvtepi64_epi32(__A),
-                                             (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi64_epi16 (__m128i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v2di)__A, __v2hi), (__v2hi){0, 0}, 0, 1, 2, 3,
-      3, 3, 3, 3);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
-              (__v8hi)__O,
-              __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi64_epi16 (__mmask8 __M, __m128i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
-              (__v8hi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
-{
-  __builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi64_epi16 (__m256i __A)
-{
-  return (__m128i)__builtin_shufflevector(
-      __builtin_convertvector((__v4di)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
-      2, 3, 4, 5, 6, 7);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
-              (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi64_epi16 (__mmask8 __M, __m256i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
-              (__v8hi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
-{
-  __builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
-}
-
-#define _mm256_extractf32x4_ps(A, imm) \
-  ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
-                                                (int)(imm), \
-                                                (__v4sf)_mm_undefined_ps(), \
-                                                (__mmask8)-1))
-
-#define _mm256_mask_extractf32x4_ps(W, U, A, imm) \
-  ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
-                                                (int)(imm), \
-                                                (__v4sf)(__m128)(W), \
-                                                (__mmask8)(U)))
-
-#define _mm256_maskz_extractf32x4_ps(U, A, imm) \
-  ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
-                                                (int)(imm), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)(U)))
-
-#define _mm256_extracti32x4_epi32(A, imm) \
-  ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
-                                                 (int)(imm), \
-                                                 (__v4si)_mm_undefined_si128(), \
-                                                 (__mmask8)-1))
-
-#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \
-  ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
-                                                 (int)(imm), \
-                                                 (__v4si)(__m128i)(W), \
-                                                 (__mmask8)(U)))
-
-#define _mm256_maskz_extracti32x4_epi32(U, A, imm) \
-  ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
-                                                 (int)(imm), \
-                                                 (__v4si)_mm_setzero_si128(), \
-                                                 (__mmask8)(U)))
-
-#define _mm256_insertf32x4(A, B, imm) \
-  ((__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \
-                                          (__v4sf)(__m128)(B), (int)(imm)))
-
-#define _mm256_mask_insertf32x4(W, U, A, B, imm) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                  (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
-                                  (__v8sf)(__m256)(W)))
-
-#define _mm256_maskz_insertf32x4(U, A, B, imm) \
-  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                  (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
-                                  (__v8sf)_mm256_setzero_ps()))
-
-#define _mm256_inserti32x4(A, B, imm) \
-  ((__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \
-                                           (__v4si)(__m128i)(B), (int)(imm)))
-
-#define _mm256_mask_inserti32x4(W, U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                  (__v8si)_mm256_inserti32x4((A), (B), (imm)), \
-                                  (__v8si)(__m256i)(W)))
-
-#define _mm256_maskz_inserti32x4(U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                  (__v8si)_mm256_inserti32x4((A), (B), (imm)), \
-                                  (__v8si)_mm256_setzero_si256()))
-
-#define _mm_getmant_pd(A, B, C) \
-  ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v2df)_mm_setzero_pd(), \
-                                             (__mmask8)-1))
-
-#define _mm_mask_getmant_pd(W, U, A, B, C) \
-  ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v2df)(__m128d)(W), \
-                                             (__mmask8)(U)))
-
-#define _mm_maskz_getmant_pd(U, A, B, C) \
-  ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v2df)_mm_setzero_pd(), \
-                                             (__mmask8)(U)))
-
-#define _mm256_getmant_pd(A, B, C) \
-  ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v4df)_mm256_setzero_pd(), \
-                                             (__mmask8)-1))
-
-#define _mm256_mask_getmant_pd(W, U, A, B, C) \
-  ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v4df)(__m256d)(W), \
-                                             (__mmask8)(U)))
-
-#define _mm256_maskz_getmant_pd(U, A, B, C) \
-  ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
-                                             (int)(((C)<<2) | (B)), \
-                                             (__v4df)_mm256_setzero_pd(), \
-                                             (__mmask8)(U)))
-
-#define _mm_getmant_ps(A, B, C) \
-  ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v4sf)_mm_setzero_ps(), \
-                                            (__mmask8)-1))
-
-#define _mm_mask_getmant_ps(W, U, A, B, C) \
-  ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v4sf)(__m128)(W), \
-                                            (__mmask8)(U)))
-
-#define _mm_maskz_getmant_ps(U, A, B, C) \
-  ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v4sf)_mm_setzero_ps(), \
-                                            (__mmask8)(U)))
-
-#define _mm256_getmant_ps(A, B, C) \
-  ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8sf)_mm256_setzero_ps(), \
-                                            (__mmask8)-1))
-
-#define _mm256_mask_getmant_ps(W, U, A, B, C) \
-  ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8sf)(__m256)(W), \
-                                            (__mmask8)(U)))
-
-#define _mm256_maskz_getmant_ps(U, A, B, C) \
-  ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8sf)_mm256_setzero_ps(), \
-                                            (__mmask8)(U)))
-
-#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
-  ((__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v2di)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
-  ((__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v2di)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
-  ((__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4di)(__m256i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
-  ((__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4di)(__m256i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
-  ((__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v2di)(__m128i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
-  ((__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v2di)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
-  ((__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v4di)(__m256i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
-  ((__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4di)(__m256i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
-  ((__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4si)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
-  ((__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4si)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
-  ((__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4si)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
-  ((__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4si)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
-  ((__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v4si)(__m128i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
-  ((__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v4si)(__m128i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
-  ((__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8si)(__m256i)(index), \
-                                        (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
-  ((__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \
-                                         (void const *)(addr), \
-                                         (__v8si)(__m256i)(index), \
-                                         (__mmask8)(mask), (int)(scale)))
-
-#define _mm256_permutex_pd(X, C) \
-  ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C)))
-
-#define _mm256_mask_permutex_pd(W, U, X, C) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                       (__v4df)_mm256_permutex_pd((X), (C)), \
-                                       (__v4df)(__m256d)(W)))
-
-#define _mm256_maskz_permutex_pd(U, X, C) \
-  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                        (__v4df)_mm256_permutex_pd((X), (C)), \
-                                        (__v4df)_mm256_setzero_pd()))
-
-#define _mm256_permutex_epi64(X, C) \
-  ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C)))
-
-#define _mm256_mask_permutex_epi64(W, U, X, C) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                      (__v4di)_mm256_permutex_epi64((X), (C)), \
-                                      (__v4di)(__m256i)(W)))
-
-#define _mm256_maskz_permutex_epi64(U, X, C) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                      (__v4di)_mm256_permutex_epi64((X), (C)), \
-                                      (__v4di)_mm256_setzero_si256()))
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_permutexvar_pd (__m256i __X, __m256d __Y)
-{
-  return (__m256d)__builtin_ia32_permvardf256((__v4df)__Y, (__v4di)__X);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_permutexvar_pd (__m256d __W, __mmask8 __U, __m256i __X,
-          __m256d __Y)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                        (__v4df)_mm256_permutexvar_pd(__X, __Y),
-                                        (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutexvar_pd (__mmask8 __U, __m256i __X, __m256d __Y)
-{
-  return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
-                                        (__v4df)_mm256_permutexvar_pd(__X, __Y),
-                                        (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_permutexvar_epi64 ( __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_permvardi256((__v4di) __Y, (__v4di) __X);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutexvar_epi64 (__mmask8 __M, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                     (__v4di)_mm256_permutexvar_epi64(__X, __Y),
-                                     (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_permutexvar_epi64 (__m256i __W, __mmask8 __M, __m256i __X,
-             __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
-                                     (__v4di)_mm256_permutexvar_epi64(__X, __Y),
-                                     (__v4di)__W);
-}
-
-#define _mm256_permutexvar_ps(A, B) _mm256_permutevar8x32_ps((B), (A))
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_permutexvar_ps(__m256 __W, __mmask8 __U, __m256i __X, __m256 __Y)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                        (__v8sf)_mm256_permutexvar_ps(__X, __Y),
-                                        (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutexvar_ps(__mmask8 __U, __m256i __X, __m256 __Y)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                        (__v8sf)_mm256_permutexvar_ps(__X, __Y),
-                                        (__v8sf)_mm256_setzero_ps());
-}
-
-#define _mm256_permutexvar_epi32(A, B) _mm256_permutevar8x32_epi32((B), (A))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_permutexvar_epi32(__m256i __W, __mmask8 __M, __m256i __X,
-                              __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                     (__v8si)_mm256_permutexvar_epi32(__X, __Y),
-                                     (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
-                                     (__v8si)_mm256_permutexvar_epi32(__X, __Y),
-                                     (__v8si)_mm256_setzero_si256());
-}
-
-#define _mm_alignr_epi32(A, B, imm) \
-  ((__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \
-                                     (__v4si)(__m128i)(B), (int)(imm)))
-
-#define _mm_mask_alignr_epi32(W, U, A, B, imm) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                    (__v4si)_mm_alignr_epi32((A), (B), (imm)), \
-                                    (__v4si)(__m128i)(W)))
-
-#define _mm_maskz_alignr_epi32(U, A, B, imm) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                    (__v4si)_mm_alignr_epi32((A), (B), (imm)), \
-                                    (__v4si)_mm_setzero_si128()))
-
-#define _mm256_alignr_epi32(A, B, imm) \
-  ((__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \
-                                     (__v8si)(__m256i)(B), (int)(imm)))
-
-#define _mm256_mask_alignr_epi32(W, U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                 (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
-                                 (__v8si)(__m256i)(W)))
-
-#define _mm256_maskz_alignr_epi32(U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                 (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
-                                 (__v8si)_mm256_setzero_si256()))
-
-#define _mm_alignr_epi64(A, B, imm) \
-  ((__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \
-                                     (__v2di)(__m128i)(B), (int)(imm)))
-
-#define _mm_mask_alignr_epi64(W, U, A, B, imm) \
-  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
-                                    (__v2di)_mm_alignr_epi64((A), (B), (imm)), \
-                                    (__v2di)(__m128i)(W)))
-
-#define _mm_maskz_alignr_epi64(U, A, B, imm) \
-  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
-                                    (__v2di)_mm_alignr_epi64((A), (B), (imm)), \
-                                    (__v2di)_mm_setzero_si128()))
-
-#define _mm256_alignr_epi64(A, B, imm) \
-  ((__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \
-                                     (__v4di)(__m256i)(B), (int)(imm)))
-
-#define _mm256_mask_alignr_epi64(W, U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                 (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
-                                 (__v4di)(__m256i)(W)))
-
-#define _mm256_maskz_alignr_epi64(U, A, B, imm) \
-  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                 (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
-                                 (__v4di)_mm256_setzero_si256()))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_movehdup_ps(__A),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_movehdup_ps (__mmask8 __U, __m128 __A)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_movehdup_ps(__A),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_movehdup_ps (__m256 __W, __mmask8 __U, __m256 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_movehdup_ps(__A),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_movehdup_ps (__mmask8 __U, __m256 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_movehdup_ps(__A),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_moveldup_ps (__m128 __W, __mmask8 __U, __m128 __A)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_moveldup_ps(__A),
-                                             (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_moveldup_ps (__mmask8 __U, __m128 __A)
-{
-  return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
-                                             (__v4sf)_mm_moveldup_ps(__A),
-                                             (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_moveldup_ps (__m256 __W, __mmask8 __U, __m256 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_moveldup_ps(__A),
-                                             (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A)
-{
-  return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
-                                             (__v8sf)_mm256_moveldup_ps(__A),
-                                             (__v8sf)_mm256_setzero_ps());
-}
-
-#define _mm256_mask_shuffle_epi32(W, U, A, I) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                       (__v8si)_mm256_shuffle_epi32((A), (I)), \
-                                       (__v8si)(__m256i)(W)))
-
-#define _mm256_maskz_shuffle_epi32(U, A, I) \
-  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                       (__v8si)_mm256_shuffle_epi32((A), (I)), \
-                                       (__v8si)_mm256_setzero_si256()))
-
-#define _mm_mask_shuffle_epi32(W, U, A, I) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                       (__v4si)_mm_shuffle_epi32((A), (I)), \
-                                       (__v4si)(__m128i)(W)))
-
-#define _mm_maskz_shuffle_epi32(U, A, I) \
-  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                       (__v4si)_mm_shuffle_epi32((A), (I)), \
-                                       (__v4si)_mm_setzero_si128()))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A)
-{
-  return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
-              (__v2df) __A,
-              (__v2df) __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_pd (__mmask8 __U, __m128d __A)
-{
-  return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
-              (__v2df) __A,
-              (__v2df) _mm_setzero_pd ());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A)
-{
-  return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
-              (__v4df) __A,
-              (__v4df) __W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_pd (__mmask8 __U, __m256d __A)
-{
-  return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
-              (__v4df) __A,
-              (__v4df) _mm256_setzero_pd ());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A)
-{
-  return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
-             (__v4sf) __A,
-             (__v4sf) __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_mov_ps (__mmask8 __U, __m128 __A)
-{
-  return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
-             (__v4sf) __A,
-             (__v4sf) _mm_setzero_ps ());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A)
-{
-  return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
-             (__v8sf) __A,
-             (__v8sf) __W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_mov_ps (__mmask8 __U, __m256 __A)
-{
-  return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
-             (__v8sf) __A,
-             (__v8sf) _mm256_setzero_ps ());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtph_ps (__m128 __W, __mmask8 __U, __m128i __A)
-{
-  return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
-             (__v4sf) __W,
-             (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
-{
-  return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
-             (__v4sf)
-             _mm_setzero_ps (),
-             (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtph_ps (__m256 __W, __mmask8 __U, __m128i __A)
-{
-  return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
-                (__v8sf) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
-{
-  return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
-                (__v8sf)
-                _mm256_setzero_ps (),
-                (__mmask8) __U);
-}
-
-#define _mm_mask_cvt_roundps_ph(W, U, A, I) \
-  ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
-                                          (__v8hi)(__m128i)(W), \
-                                          (__mmask8)(U)))
-
-#define _mm_maskz_cvt_roundps_ph(U, A, I) \
-  ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
-                                          (__v8hi)_mm_setzero_si128(), \
-                                          (__mmask8)(U)))
-
-#define _mm_mask_cvtps_ph  _mm_mask_cvt_roundps_ph
-#define _mm_maskz_cvtps_ph _mm_maskz_cvt_roundps_ph
-
-#define _mm256_mask_cvt_roundps_ph(W, U, A, I) \
-  ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
-                                             (__v8hi)(__m128i)(W), \
-                                             (__mmask8)(U)))
-
-#define _mm256_maskz_cvt_roundps_ph(U, A, I) \
-  ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
-                                             (__v8hi)_mm_setzero_si128(), \
-                                             (__mmask8)(U)))
-
-#define _mm256_mask_cvtps_ph  _mm256_mask_cvt_roundps_ph
-#define _mm256_maskz_cvtps_ph _mm256_maskz_cvt_roundps_ph
-
-
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS256
-
-#endif /* __AVX512VLINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/cetintrin.h b/linux-x86/lib64/clang/14.0.2/include/cetintrin.h
deleted file mode 100644
index 4290e9d..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/cetintrin.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*===---- cetintrin.h - CET intrinsic --------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use <cetintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __CETINTRIN_H
-#define __CETINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS                                                     \
-  __attribute__((__always_inline__, __nodebug__, __target__("shstk")))
-
-static __inline__ void __DEFAULT_FN_ATTRS _incsspd(int __a) {
-  __builtin_ia32_incsspd(__a);
-}
-
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS _incsspq(unsigned long long __a) {
-  __builtin_ia32_incsspq(__a);
-}
-#endif /* __x86_64__ */
-
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {
-  __builtin_ia32_incsspq(__a);
-}
-#else /* __x86_64__ */
-static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {
-  __builtin_ia32_incsspd((int)__a);
-}
-#endif /* __x86_64__ */
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd(unsigned int __a) {
-  return __builtin_ia32_rdsspd(__a);
-}
-
-#ifdef __x86_64__
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long long __a) {
-  return __builtin_ia32_rdsspq(__a);
-}
-#endif /* __x86_64__ */
-
-#ifdef __x86_64__
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS _get_ssp(void) {
-  return __builtin_ia32_rdsspq(0);
-}
-#else /* __x86_64__ */
-static __inline__ unsigned int __DEFAULT_FN_ATTRS _get_ssp(void) {
-  return __builtin_ia32_rdsspd(0);
-}
-#endif /* __x86_64__ */
-
-static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp() {
-  __builtin_ia32_saveprevssp();
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS _rstorssp(void * __p) {
-  __builtin_ia32_rstorssp(__p);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS _wrssd(unsigned int __a, void * __p) {
-  __builtin_ia32_wrssd(__a, __p);
-}
-
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS _wrssq(unsigned long long __a, void * __p) {
-  __builtin_ia32_wrssq(__a, __p);
-}
-#endif /* __x86_64__ */
-
-static __inline__ void __DEFAULT_FN_ATTRS _wrussd(unsigned int __a, void * __p) {
-  __builtin_ia32_wrussd(__a, __p);
-}
-
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS _wrussq(unsigned long long __a, void * __p) {
-  __builtin_ia32_wrussq(__a, __p);
-}
-#endif /* __x86_64__ */
-
-static __inline__ void __DEFAULT_FN_ATTRS _setssbsy() {
-  __builtin_ia32_setssbsy();
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS _clrssbsy(void * __p) {
-  __builtin_ia32_clrssbsy(__p);
-}
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __CETINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/cpuid.h b/linux-x86/lib64/clang/14.0.2/include/cpuid.h
deleted file mode 100644
index 6df1b4a..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/cpuid.h
+++ /dev/null
@@ -1,320 +0,0 @@
-/*===---- cpuid.h - X86 cpu model detection --------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CPUID_H
-#define __CPUID_H
-
-#if !(__x86_64__ || __i386__)
-#error this header is for x86 only
-#endif
-
-/* Responses identification request with %eax 0 */
-/* AMD:     "AuthenticAMD" */
-#define signature_AMD_ebx 0x68747541
-#define signature_AMD_edx 0x69746e65
-#define signature_AMD_ecx 0x444d4163
-/* CENTAUR: "CentaurHauls" */
-#define signature_CENTAUR_ebx 0x746e6543
-#define signature_CENTAUR_edx 0x48727561
-#define signature_CENTAUR_ecx 0x736c7561
-/* CYRIX:   "CyrixInstead" */
-#define signature_CYRIX_ebx 0x69727943
-#define signature_CYRIX_edx 0x736e4978
-#define signature_CYRIX_ecx 0x64616574
-/* HYGON:   "HygonGenuine" */
-#define signature_HYGON_ebx 0x6f677948
-#define signature_HYGON_edx 0x6e65476e
-#define signature_HYGON_ecx 0x656e6975
-/* INTEL:   "GenuineIntel" */
-#define signature_INTEL_ebx 0x756e6547
-#define signature_INTEL_edx 0x49656e69
-#define signature_INTEL_ecx 0x6c65746e
-/* TM1:     "TransmetaCPU" */
-#define signature_TM1_ebx 0x6e617254
-#define signature_TM1_edx 0x74656d73
-#define signature_TM1_ecx 0x55504361
-/* TM2:     "GenuineTMx86" */
-#define signature_TM2_ebx 0x756e6547
-#define signature_TM2_edx 0x54656e69
-#define signature_TM2_ecx 0x3638784d
-/* NSC:     "Geode by NSC" */
-#define signature_NSC_ebx 0x646f6547
-#define signature_NSC_edx 0x79622065
-#define signature_NSC_ecx 0x43534e20
-/* NEXGEN:  "NexGenDriven" */
-#define signature_NEXGEN_ebx 0x4778654e
-#define signature_NEXGEN_edx 0x72446e65
-#define signature_NEXGEN_ecx 0x6e657669
-/* RISE:    "RiseRiseRise" */
-#define signature_RISE_ebx 0x65736952
-#define signature_RISE_edx 0x65736952
-#define signature_RISE_ecx 0x65736952
-/* SIS:     "SiS SiS SiS " */
-#define signature_SIS_ebx 0x20536953
-#define signature_SIS_edx 0x20536953
-#define signature_SIS_ecx 0x20536953
-/* UMC:     "UMC UMC UMC " */
-#define signature_UMC_ebx 0x20434d55
-#define signature_UMC_edx 0x20434d55
-#define signature_UMC_ecx 0x20434d55
-/* VIA:     "VIA VIA VIA " */
-#define signature_VIA_ebx 0x20414956
-#define signature_VIA_edx 0x20414956
-#define signature_VIA_ecx 0x20414956
-/* VORTEX:  "Vortex86 SoC" */
-#define signature_VORTEX_ebx 0x74726f56
-#define signature_VORTEX_edx 0x36387865
-#define signature_VORTEX_ecx 0x436f5320
-
-/* Features in %ecx for leaf 1 */
-#define bit_SSE3        0x00000001
-#define bit_PCLMULQDQ   0x00000002
-#define bit_PCLMUL      bit_PCLMULQDQ   /* for gcc compat */
-#define bit_DTES64      0x00000004
-#define bit_MONITOR     0x00000008
-#define bit_DSCPL       0x00000010
-#define bit_VMX         0x00000020
-#define bit_SMX         0x00000040
-#define bit_EIST        0x00000080
-#define bit_TM2         0x00000100
-#define bit_SSSE3       0x00000200
-#define bit_CNXTID      0x00000400
-#define bit_FMA         0x00001000
-#define bit_CMPXCHG16B  0x00002000
-#define bit_xTPR        0x00004000
-#define bit_PDCM        0x00008000
-#define bit_PCID        0x00020000
-#define bit_DCA         0x00040000
-#define bit_SSE41       0x00080000
-#define bit_SSE4_1      bit_SSE41       /* for gcc compat */
-#define bit_SSE42       0x00100000
-#define bit_SSE4_2      bit_SSE42       /* for gcc compat */
-#define bit_x2APIC      0x00200000
-#define bit_MOVBE       0x00400000
-#define bit_POPCNT      0x00800000
-#define bit_TSCDeadline 0x01000000
-#define bit_AESNI       0x02000000
-#define bit_AES         bit_AESNI       /* for gcc compat */
-#define bit_XSAVE       0x04000000
-#define bit_OSXSAVE     0x08000000
-#define bit_AVX         0x10000000
-#define bit_F16C        0x20000000
-#define bit_RDRND       0x40000000
-
-/* Features in %edx for leaf 1 */
-#define bit_FPU         0x00000001
-#define bit_VME         0x00000002
-#define bit_DE          0x00000004
-#define bit_PSE         0x00000008
-#define bit_TSC         0x00000010
-#define bit_MSR         0x00000020
-#define bit_PAE         0x00000040
-#define bit_MCE         0x00000080
-#define bit_CX8         0x00000100
-#define bit_CMPXCHG8B   bit_CX8         /* for gcc compat */
-#define bit_APIC        0x00000200
-#define bit_SEP         0x00000800
-#define bit_MTRR        0x00001000
-#define bit_PGE         0x00002000
-#define bit_MCA         0x00004000
-#define bit_CMOV        0x00008000
-#define bit_PAT         0x00010000
-#define bit_PSE36       0x00020000
-#define bit_PSN         0x00040000
-#define bit_CLFSH       0x00080000
-#define bit_DS          0x00200000
-#define bit_ACPI        0x00400000
-#define bit_MMX         0x00800000
-#define bit_FXSR        0x01000000
-#define bit_FXSAVE      bit_FXSR        /* for gcc compat */
-#define bit_SSE         0x02000000
-#define bit_SSE2        0x04000000
-#define bit_SS          0x08000000
-#define bit_HTT         0x10000000
-#define bit_TM          0x20000000
-#define bit_PBE         0x80000000
-
-/* Features in %ebx for leaf 7 sub-leaf 0 */
-#define bit_FSGSBASE    0x00000001
-#define bit_SGX         0x00000004
-#define bit_BMI         0x00000008
-#define bit_HLE         0x00000010
-#define bit_AVX2        0x00000020
-#define bit_SMEP        0x00000080
-#define bit_BMI2        0x00000100
-#define bit_ENH_MOVSB   0x00000200
-#define bit_INVPCID     0x00000400
-#define bit_RTM         0x00000800
-#define bit_MPX         0x00004000
-#define bit_AVX512F     0x00010000
-#define bit_AVX512DQ    0x00020000
-#define bit_RDSEED      0x00040000
-#define bit_ADX         0x00080000
-#define bit_AVX512IFMA  0x00200000
-#define bit_CLFLUSHOPT  0x00800000
-#define bit_CLWB        0x01000000
-#define bit_AVX512PF    0x04000000
-#define bit_AVX512ER    0x08000000
-#define bit_AVX512CD    0x10000000
-#define bit_SHA         0x20000000
-#define bit_AVX512BW    0x40000000
-#define bit_AVX512VL    0x80000000
-
-/* Features in %ecx for leaf 7 sub-leaf 0 */
-#define bit_PREFTCHWT1       0x00000001
-#define bit_AVX512VBMI       0x00000002
-#define bit_PKU              0x00000004
-#define bit_OSPKE            0x00000010
-#define bit_WAITPKG          0x00000020
-#define bit_AVX512VBMI2      0x00000040
-#define bit_SHSTK            0x00000080
-#define bit_GFNI             0x00000100
-#define bit_VAES             0x00000200
-#define bit_VPCLMULQDQ       0x00000400
-#define bit_AVX512VNNI       0x00000800
-#define bit_AVX512BITALG     0x00001000
-#define bit_AVX512VPOPCNTDQ  0x00004000
-#define bit_RDPID            0x00400000
-#define bit_CLDEMOTE         0x02000000
-#define bit_MOVDIRI          0x08000000
-#define bit_MOVDIR64B        0x10000000
-#define bit_ENQCMD           0x20000000
-
-/* Features in %edx for leaf 7 sub-leaf 0 */
-#define bit_AVX5124VNNIW  0x00000004
-#define bit_AVX5124FMAPS  0x00000008
-#define bit_UINTR         0x00000020
-#define bit_SERIALIZE     0x00004000
-#define bit_TSXLDTRK      0x00010000
-#define bit_PCONFIG       0x00040000
-#define bit_IBT           0x00100000
-#define bit_AMXBF16       0x00400000
-#define bit_AVX512FP16    0x00800000
-#define bit_AMXTILE       0x01000000
-#define bit_AMXINT8       0x02000000
-
-/* Features in %eax for leaf 7 sub-leaf 1 */
-#define bit_AVXVNNI       0x00000008
-#define bit_AVX512BF16    0x00000020
-#define bit_HRESET        0x00400000
-
-/* Features in %eax for leaf 13 sub-leaf 1 */
-#define bit_XSAVEOPT    0x00000001
-#define bit_XSAVEC      0x00000002
-#define bit_XSAVES      0x00000008
-
-/* Features in %eax for leaf 0x14 sub-leaf 0 */
-#define bit_PTWRITE     0x00000010
-
-/* Features in %ecx for leaf 0x80000001 */
-#define bit_LAHF_LM     0x00000001
-#define bit_ABM         0x00000020
-#define bit_LZCNT       bit_ABM        /* for gcc compat */
-#define bit_SSE4a       0x00000040
-#define bit_PRFCHW      0x00000100
-#define bit_XOP         0x00000800
-#define bit_LWP         0x00008000
-#define bit_FMA4        0x00010000
-#define bit_TBM         0x00200000
-#define bit_MWAITX      0x20000000
-
-/* Features in %edx for leaf 0x80000001 */
-#define bit_MMXEXT      0x00400000
-#define bit_LM          0x20000000
-#define bit_3DNOWP      0x40000000
-#define bit_3DNOW       0x80000000
-
-/* Features in %ebx for leaf 0x80000008 */
-#define bit_CLZERO      0x00000001
-#define bit_WBNOINVD    0x00000200
-
-
-#if __i386__
-#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \
-    __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
-                  : "0"(__leaf))
-
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
-    __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
-                  : "0"(__leaf), "2"(__count))
-#else
-/* x86-64 uses %rbx as the base register, so preserve it. */
-#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \
-    __asm("  xchgq  %%rbx,%q1\n" \
-          "  cpuid\n" \
-          "  xchgq  %%rbx,%q1" \
-        : "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
-        : "0"(__leaf))
-
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
-    __asm("  xchgq  %%rbx,%q1\n" \
-          "  cpuid\n" \
-          "  xchgq  %%rbx,%q1" \
-        : "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
-        : "0"(__leaf), "2"(__count))
-#endif
-
-static __inline int __get_cpuid_max (unsigned int __leaf, unsigned int *__sig)
-{
-    unsigned int __eax, __ebx, __ecx, __edx;
-#if __i386__
-    int __cpuid_supported;
-
-    __asm("  pushfl\n"
-          "  popl   %%eax\n"
-          "  movl   %%eax,%%ecx\n"
-          "  xorl   $0x00200000,%%eax\n"
-          "  pushl  %%eax\n"
-          "  popfl\n"
-          "  pushfl\n"
-          "  popl   %%eax\n"
-          "  movl   $0,%0\n"
-          "  cmpl   %%eax,%%ecx\n"
-          "  je     1f\n"
-          "  movl   $1,%0\n"
-          "1:"
-        : "=r" (__cpuid_supported) : : "eax", "ecx");
-    if (!__cpuid_supported)
-        return 0;
-#endif
-
-    __cpuid(__leaf, __eax, __ebx, __ecx, __edx);
-    if (__sig)
-        *__sig = __ebx;
-    return __eax;
-}
-
-static __inline int __get_cpuid (unsigned int __leaf, unsigned int *__eax,
-                                 unsigned int *__ebx, unsigned int *__ecx,
-                                 unsigned int *__edx)
-{
-    unsigned int __max_leaf = __get_cpuid_max(__leaf & 0x80000000, 0);
-
-    if (__max_leaf == 0 || __max_leaf < __leaf)
-        return 0;
-
-    __cpuid(__leaf, *__eax, *__ebx, *__ecx, *__edx);
-    return 1;
-}
-
-static __inline int __get_cpuid_count (unsigned int __leaf,
-                                       unsigned int __subleaf,
-                                       unsigned int *__eax, unsigned int *__ebx,
-                                       unsigned int *__ecx, unsigned int *__edx)
-{
-    unsigned int __max_leaf = __get_cpuid_max(__leaf & 0x80000000, 0);
-
-    if (__max_leaf == 0 || __max_leaf < __leaf)
-        return 0;
-
-    __cpuid_count(__leaf, __subleaf, *__eax, *__ebx, *__ecx, *__edx);
-    return 1;
-}
-
-#endif /* __CPUID_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/emmintrin.h b/linux-x86/lib64/clang/14.0.2/include/emmintrin.h
deleted file mode 100644
index 6e9c303..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/emmintrin.h
+++ /dev/null
@@ -1,4985 +0,0 @@
-/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __EMMINTRIN_H
-#define __EMMINTRIN_H
-
-#if !defined(__i386__) && !defined(__x86_64__)
-#error "This header is only meant to be used on x86 and x64 architecture"
-#endif
-
-#include <xmmintrin.h>
-
-typedef double __m128d __attribute__((__vector_size__(16), __aligned__(16)));
-typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16)));
-
-typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef long long __m128i_u __attribute__((__vector_size__(16), __aligned__(1)));
-
-/* Type defines.  */
-typedef double __v2df __attribute__ ((__vector_size__ (16)));
-typedef long long __v2di __attribute__ ((__vector_size__ (16)));
-typedef short __v8hi __attribute__((__vector_size__(16)));
-typedef char __v16qi __attribute__((__vector_size__(16)));
-
-/* Unsigned types */
-typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16)));
-typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
-typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
-
-/* We need an explicitly signed variant for char. Note that this shouldn't
- * appear in the interface though. */
-typedef signed char __v16qs __attribute__((__vector_size__(16)));
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"), __min_vector_width__(64)))
-
-/// Adds lower double-precision values in both operands and returns the
-///    sum in the lower 64 bits of the result. The upper 64 bits of the result
-///    are copied from the upper double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDSD / ADDSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    sum of the lower 64 bits of both operands. The upper 64 bits are copied
-///    from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_sd(__m128d __a, __m128d __b)
-{
-  __a[0] += __b[0];
-  return __a;
-}
-
-/// Adds two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDPD / ADDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the sums of both
-///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a + (__v2df)__b);
-}
-
-/// Subtracts the lower double-precision value of the second operand
-///    from the lower double-precision value of the first operand and returns
-///    the difference in the lower 64 bits of the result. The upper 64 bits of
-///    the result are copied from the upper double-precision value of the first
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBSD / SUBSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the minuend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the subtrahend.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    difference of the lower 64 bits of both operands. The upper 64 bits are
-///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_sd(__m128d __a, __m128d __b)
-{
-  __a[0] -= __b[0];
-  return __a;
-}
-
-/// Subtracts two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBPD / SUBPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the minuend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the subtrahend.
-/// \returns A 128-bit vector of [2 x double] containing the differences between
-///    both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a - (__v2df)__b);
-}
-
-/// Multiplies lower double-precision values in both operands and returns
-///    the product in the lower 64 bits of the result. The upper 64 bits of the
-///    result are copied from the upper double-precision value of the first
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULSD / MULSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    product of the lower 64 bits of both operands. The upper 64 bits are
-///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_sd(__m128d __a, __m128d __b)
-{
-  __a[0] *= __b[0];
-  return __a;
-}
-
-/// Multiplies two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULPD / MULPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \returns A 128-bit vector of [2 x double] containing the products of both
-///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a * (__v2df)__b);
-}
-
-/// Divides the lower double-precision value of the first operand by the
-///    lower double-precision value of the second operand and returns the
-///    quotient in the lower 64 bits of the result. The upper 64 bits of the
-///    result are copied from the upper double-precision value of the first
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVSD / DIVSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the dividend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing divisor.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    quotient of the lower 64 bits of both operands. The upper 64 bits are
-///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_sd(__m128d __a, __m128d __b)
-{
-  __a[0] /= __b[0];
-  return __a;
-}
-
-/// Performs an element-by-element division of two 128-bit vectors of
-///    [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVPD / DIVPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the dividend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the divisor.
-/// \returns A 128-bit vector of [2 x double] containing the quotients of both
-///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a / (__v2df)__b);
-}
-
-/// Calculates the square root of the lower double-precision value of
-///    the second operand and returns it in the lower 64 bits of the result.
-///    The upper 64 bits of the result are copied from the upper
-///    double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTSD / SQRTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    upper 64 bits of this operand are copied to the upper 64 bits of the
-///    result.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    square root is calculated using the lower 64 bits of this operand.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    square root of the lower 64 bits of operand \a __b, and whose upper 64
-///    bits are copied from the upper 64 bits of operand \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Calculates the square root of the each of two values stored in a
-///    128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTPD / SQRTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [2 x double] containing the square roots of the
-///    values in the operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_pd(__m128d __a)
-{
-  return __builtin_ia32_sqrtpd((__v2df)__a);
-}
-
-/// Compares lower 64-bit double-precision values of both operands, and
-///    returns the lesser of the pair of values in the lower 64-bits of the
-///    result. The upper 64 bits of the result are copied from the upper
-///    double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINSD / MINSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    minimum value between both operands. The upper 64 bits are copied from
-///    the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Performs element-by-element comparison of the two 128-bit vectors of
-///    [2 x double] and returns the vector containing the lesser of each pair of
-///    values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINPD / MINPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \returns A 128-bit vector of [2 x double] containing the minimum values
-///    between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares lower 64-bit double-precision values of both operands, and
-///    returns the greater of the pair of values in the lower 64-bits of the
-///    result. The upper 64 bits of the result are copied from the upper
-///    double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXSD / MAXSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    maximum value between both operands. The upper 64 bits are copied from
-///    the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Performs element-by-element comparison of the two 128-bit vectors of
-///    [2 x double] and returns the vector containing the greater of each pair
-///    of values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXPD / MAXPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \returns A 128-bit vector of [2 x double] containing the maximum values
-///    between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAND / PAND </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
-///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_and_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2du)__a & (__v2du)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit vectors of [2 x double], using
-///    the one's complement of the values contained in the first source operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPANDN / PANDN </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the left source operand. The
-///    one's complement of this value is used in the bitwise AND.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the right source operand.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
-///    values in the second operand and the one's complement of the first
-///    operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_andnot_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)(~(__v2du)__a & (__v2du)__b);
-}
-
-/// Performs a bitwise OR of two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPOR / POR </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise OR of the
-///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_or_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2du)__a | (__v2du)__b);
-}
-
-/// Performs a bitwise XOR of two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPXOR / PXOR </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise XOR of the
-///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_xor_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2du)__a ^ (__v2du)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] for equality. Each comparison yields 0x0
-///    for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPEQPD / CMPEQPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are less than those in the second operand. Each comparison
-///    yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTPD / CMPLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are less than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLEPD / CMPLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTPD / CMPLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are greater than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLEPD / CMPLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are ordered with respect to those in the second operand.
-///
-///    A pair of double-precision values are "ordered" with respect to each
-///    other if neither value is a NaN. Each comparison yields 0x0 for false,
-///    0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPORDPD / CMPORDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are unordered with respect to those in the second operand.
-///
-///    A pair of double-precision values are "unordered" with respect to each
-///    other if one or both values are NaN. Each comparison yields 0x0 for
-///    false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPUNORDPD / CMPUNORDPD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are unequal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNEQPD / CMPNEQPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not less than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTPD / CMPNLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not less than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLEPD / CMPNLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTPD / CMPNLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not greater than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLEPD / CMPNLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] for equality.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPEQSD / CMPEQSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTSD / CMPLTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLESD / CMPLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than the corresponding value
-///    in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTSD / CMPLTSD </c> instruction.
-///
-/// \param __a
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///     results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLESD / CMPLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is "ordered" with respect to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
-///    of double-precision values are "ordered" with respect to each other if
-///    neither value is a NaN.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPORDSD / CMPORDSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is "unordered" with respect to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
-///    of double-precision values are "unordered" with respect to each other if
-///    one or both values are NaN.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPUNORDSD / CMPUNORDSD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is unequal to the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNEQSD / CMPNEQSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not less than the corresponding
-///    value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTSD / CMPNLTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLESD / CMPNLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns  A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not greater than the corresponding
-///    value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTSD / CMPNLTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLESD / CMPNLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] for equality.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comieq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comilt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comile_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than the corresponding value
-///    in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comigt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comige_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is unequal to the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 1 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comineq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] for equality. The
-///    comparison yields 0 for false, 1 for true.
-///
-///    If either of the two lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomieq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomilt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomile_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than the corresponding value
-///    in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomigt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true.  If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomige_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is unequal to the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 1 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison result. If either of the two
-///    lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomineq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two single-precision floating-point
-///    values, returned in the lower 64 bits of a 128-bit vector of [4 x float].
-///    The upper 64 bits of the result vector are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPD2PS / CVTPD2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
-///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtpd_ps(__m128d __a)
-{
-  return __builtin_ia32_cvtpd2ps((__v2df)__a);
-}
-
-/// Converts the lower two single-precision floating-point elements of a
-///    128-bit vector of [4 x float] into two double-precision floating-point
-///    values, returned in a 128-bit vector of [2 x double]. The upper two
-///    elements of the input vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPS2PD / CVTPS2PD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower two single-precision
-///    floating-point elements are converted to double-precision values. The
-///    upper two elements are unused.
-/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtps_pd(__m128 __a)
-{
-  return (__m128d) __builtin_convertvector(
-      __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);
-}
-
-/// Converts the lower two integer elements of a 128-bit vector of
-///    [4 x i32] into two double-precision floating-point values, returned in a
-///    128-bit vector of [2 x double].
-///
-///    The upper two elements of the input vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTDQ2PD / CVTDQ2PD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [4 x i32]. The lower two integer elements are
-///    converted to double-precision values.
-///
-///    The upper two elements are unused.
-/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtepi32_pd(__m128i __a)
-{
-  return (__m128d) __builtin_convertvector(
-      __builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper
-///    64 bits of the result vector are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPD2DQ / CVTPD2DQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
-///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtpd_epi32(__m128d __a)
-{
-  return __builtin_ia32_cvtpd2dq((__v2df)__a);
-}
-
-/// Converts the low-order element of a 128-bit vector of [2 x double]
-///    into a 32-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSD2SI / CVTSD2SI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsd_si32(__m128d __a)
-{
-  return __builtin_ia32_cvtsd2si((__v2df)__a);
-}
-
-/// Converts the lower double-precision floating-point element of a
-///    128-bit vector of [2 x double], in the second parameter, into a
-///    single-precision floating-point value, returned in the lower 32 bits of a
-///    128-bit vector of [4 x float]. The upper 96 bits of the result vector are
-///    copied from the upper 96 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSD2SS / CVTSD2SS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The upper 96 bits of this parameter are
-///    copied to the upper 96 bits of the result.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision
-///    floating-point element is used in the conversion.
-/// \returns A 128-bit vector of [4 x float]. The lower 32 bits contain the
-///    converted value from the second parameter. The upper 96 bits are copied
-///    from the upper 96 bits of the first parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtsd_ss(__m128 __a, __m128d __b)
-{
-  return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b);
-}
-
-/// Converts a 32-bit signed integer value, in the second parameter, into
-///    a double-precision floating-point value, returned in the lower 64 bits of
-///    a 128-bit vector of [2 x double]. The upper 64 bits of the result vector
-///    are copied from the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SD / CVTSI2SD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are
-///    copied to the upper 64 bits of the result.
-/// \param __b
-///    A 32-bit signed integer containing the value to be converted.
-/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
-///    converted value from the second parameter. The upper 64 bits are copied
-///    from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi32_sd(__m128d __a, int __b)
-{
-  __a[0] = __b;
-  return __a;
-}
-
-/// Converts the lower single-precision floating-point element of a
-///    128-bit vector of [4 x float], in the second parameter, into a
-///    double-precision floating-point value, returned in the lower 64 bits of
-///    a 128-bit vector of [2 x double]. The upper 64 bits of the result vector
-///    are copied from the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSS2SD / CVTSS2SD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are
-///    copied to the upper 64 bits of the result.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower single-precision
-///    floating-point element is used in the conversion.
-/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
-///    converted value from the second parameter. The upper 64 bits are copied
-///    from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtss_sd(__m128d __a, __m128 __b)
-{
-  __a[0] = __b[0];
-  return __a;
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in the lower 64 bits of a 128-bit vector of [4 x i32].
-///
-///    If the result of either conversion is inexact, the result is truncated
-///    (rounded towards zero) regardless of the current MXCSR setting. The upper
-///    64 bits of the result vector are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTPD2DQ / CVTTPD2DQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
-///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttpd_epi32(__m128d __a)
-{
-  return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);
-}
-
-/// Converts the low-order element of a [2 x double] vector into a 32-bit
-///    signed integer value, truncating the result when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSD2SI / CVTTSD2SI </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvttsd_si32(__m128d __a)
-{
-  return __builtin_ia32_cvttsd2si((__v2df)__a);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in a 64-bit vector of [2 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPD2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpd_pi32(__m128d __a)
-{
-  return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in a 64-bit vector of [2 x i32].
-///
-///    If the result of either conversion is inexact, the result is truncated
-///    (rounded towards zero) regardless of the current MXCSR setting.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTTPD2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvttpd_pi32(__m128d __a)
-{
-  return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);
-}
-
-/// Converts the two signed 32-bit integer elements of a 64-bit vector of
-///    [2 x i32] into two double-precision floating-point values, returned in a
-///    128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PD </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of [2 x i32].
-/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi32_pd(__m64 __a)
-{
-  return __builtin_ia32_cvtpi2pd((__v2si)__a);
-}
-
-/// Returns the low-order element of a 128-bit vector of [2 x double] as
-///    a double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are returned.
-/// \returns A double-precision floating-point value copied from the lower 64
-///    bits of \a __a.
-static __inline__ double __DEFAULT_FN_ATTRS
-_mm_cvtsd_f64(__m128d __a)
-{
-  return __a[0];
-}
-
-/// Loads a 128-bit floating-point vector of [2 x double] from an aligned
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPD / MOVAPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location has to be 16-byte aligned.
-/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_pd(double const *__dp)
-{
-  return *(const __m128d*)__dp;
-}
-
-/// Loads a double-precision floating-point value from a specified memory
-///    location and duplicates it to both vector elements of a 128-bit vector of
-///    [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP / MOVDDUP </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location containing a double-precision value.
-/// \returns A 128-bit vector of [2 x double] containing the loaded and
-///    duplicated values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load1_pd(double const *__dp)
-{
-  struct __mm_load1_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_load1_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, __u };
-}
-
-#define        _mm_load_pd1(dp)        _mm_load1_pd(dp)
-
-/// Loads two double-precision values, in reverse order, from an aligned
-///    memory location into a 128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPD / MOVAPD </c> instruction +
-/// needed shuffling instructions. In AVX mode, the shuffling may be combined
-/// with the \c VMOVAPD, resulting in only a \c VPERMILPD instruction.
-///
-/// \param __dp
-///    A 16-byte aligned pointer to an array of double-precision values to be
-///    loaded in reverse order.
-/// \returns A 128-bit vector of [2 x double] containing the reversed loaded
-///    values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadr_pd(double const *__dp)
-{
-  __m128d __u = *(const __m128d*)__dp;
-  return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
-}
-
-/// Loads a 128-bit floating-point vector of [2 x double] from an
-///    unaligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPD / MOVUPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadu_pd(double const *__dp)
-{
-  struct __loadu_pd {
-    __m128d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_pd*)__dp)->__v;
-}
-
-/// Loads a 64-bit integer value to the low element of a 128-bit integer
-///    vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A pointer to a 64-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [2 x i64] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si64(void const *__a)
-{
-  struct __loadu_si64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  long long __u = ((const struct __loadu_si64*)__a)->__v;
-  return __extension__ (__m128i)(__v2di){__u, 0LL};
-}
-
-/// Loads a 32-bit integer value to the low element of a 128-bit integer
-///    vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __a
-///    A pointer to a 32-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [4 x i32] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si32(void const *__a)
-{
-  struct __loadu_si32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  int __u = ((const struct __loadu_si32*)__a)->__v;
-  return __extension__ (__m128i)(__v4si){__u, 0, 0, 0};
-}
-
-/// Loads a 16-bit integer value to the low element of a 128-bit integer
-///    vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic does not correspond to a specific instruction.
-///
-/// \param __a
-///    A pointer to a 16-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [8 x i16] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si16(void const *__a)
-{
-  struct __loadu_si16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  short __u = ((const struct __loadu_si16*)__a)->__v;
-  return __extension__ (__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
-}
-
-/// Loads a 64-bit double-precision value to the low element of a
-///    128-bit integer vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSD / MOVSD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location containing a double-precision value.
-///    The address of the memory location does not have to be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the loaded value.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_sd(double const *__dp)
-{
-  struct __mm_load_sd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_load_sd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, 0 };
-}
-
-/// Loads a double-precision value into the high-order bits of a 128-bit
-///    vector of [2 x double]. The low-order bits are copied from the low-order
-///    bits of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [63:0] are written to bits [63:0] of the result.
-/// \param __dp
-///    A pointer to a 64-bit memory location containing a double-precision
-///    floating-point value that is loaded. The loaded value is written to bits
-///    [127:64] of the result. The address of the memory location does not have
-///    to be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadh_pd(__m128d __a, double const *__dp)
-{
-  struct __mm_loadh_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_loadh_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __a[0], __u };
-}
-
-/// Loads a double-precision value into the low-order bits of a 128-bit
-///    vector of [2 x double]. The high-order bits are copied from the
-///    high-order bits of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [127:64] are written to bits [127:64] of the result.
-/// \param __dp
-///    A pointer to a 64-bit memory location containing a double-precision
-///    floating-point value that is loaded. The loaded value is written to bits
-///    [63:0] of the result. The address of the memory location does not have to
-///    be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadl_pd(__m128d __a, double const *__dp)
-{
-  struct __mm_loadl_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_loadl_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, __a[1] };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double] with
-///    unspecified content. This could be used as an argument to another
-///    intrinsic function where the argument is required but the value is not
-///    actually used.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 128-bit floating-point vector of [2 x double] with unspecified
-///    content.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_undefined_pd(void)
-{
-  return (__m128d)__builtin_ia32_undef128();
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]. The lower
-///    64 bits of the vector are initialized with the specified double-precision
-///    floating-point value. The upper 64 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize the lower 64
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double]. The
-///    lower 64 bits contain the value of the parameter. The upper 64 bits are
-///    set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_sd(double __w)
-{
-  return __extension__ (__m128d){ __w, 0 };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double], with each
-///    of the two double-precision floating-point vector elements set to the
-///    specified double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP / MOVLHPS </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set1_pd(double __w)
-{
-  return __extension__ (__m128d){ __w, __w };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double], with each
-///    of the two double-precision floating-point vector elements set to the
-///    specified double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP / MOVLHPS </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd1(double __w)
-{
-  return _mm_set1_pd(__w);
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]
-///    initialized with the specified double-precision floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize the upper 64
-///    bits of the result.
-/// \param __x
-///    A double-precision floating-point value used to initialize the lower 64
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd(double __w, double __x)
-{
-  return __extension__ (__m128d){ __x, __w };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double],
-///    initialized in reverse order with the specified double-precision
-///    floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize the lower 64
-///    bits of the result.
-/// \param __x
-///    A double-precision floating-point value used to initialize the upper 64
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setr_pd(double __w, double __x)
-{
-  return __extension__ (__m128d){ __w, __x };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]
-///    initialized to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
-///
-/// \returns An initialized 128-bit floating-point vector of [2 x double] with
-///    all elements set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setzero_pd(void)
-{
-  return __extension__ (__m128d){ 0, 0 };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]. The lower
-///    64 bits are set to the lower 64 bits of the second parameter. The upper
-///    64 bits are set to the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBLENDPD / BLENDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits are written to the
-///    upper 64 bits of the result.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower 64 bits are written to the
-///    lower 64 bits of the result.
-/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_move_sd(__m128d __a, __m128d __b)
-{
-  __a[0] = __b[0];
-  return __a;
-}
-
-/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSD / MOVSD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_sd(double *__dp, __m128d __a)
-{
-  struct __mm_store_sd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
-}
-
-/// Moves packed double-precision values from a 128-bit vector of
-///    [2 x double] to a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c>VMOVAPD / MOVAPS</c> instruction.
-///
-/// \param __dp
-///    A pointer to an aligned memory location that can store two
-///    double-precision values.
-/// \param __a
-///    A packed 128-bit vector of [2 x double] containing the values to be
-///    moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd(double *__dp, __m128d __a)
-{
-  *(__m128d*)__dp = __a;
-}
-
-/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
-///    the upper and lower 64 bits of a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the
-///   <c> VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location that can store two double-precision
-///    values.
-/// \param __a
-///    A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
-///    of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store1_pd(double *__dp, __m128d __a)
-{
-  __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
-  _mm_store_pd(__dp, __a);
-}
-
-/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
-///    the upper and lower 64 bits of a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the
-///   <c> VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location that can store two double-precision
-///    values.
-/// \param __a
-///    A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
-///    of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd1(double *__dp, __m128d __a)
-{
-  _mm_store1_pd(__dp, __a);
-}
-
-/// Stores a 128-bit vector of [2 x double] into an unaligned memory
-///    location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPD / MOVUPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_pd(double *__dp, __m128d __a)
-{
-  struct __storeu_pd {
-    __m128d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_pd*)__dp)->__v = __a;
-}
-
-/// Stores two double-precision values, in reverse order, from a 128-bit
-///    vector of [2 x double] to a 16-byte aligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to a shuffling instruction followed by a
-/// <c> VMOVAPD / MOVAPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 16-byte aligned memory location that can store two
-///    double-precision values.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values to be reversed and
-///    stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storer_pd(double *__dp, __m128d __a)
-{
-  __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0);
-  *(__m128d *)__dp = __a;
-}
-
-/// Stores the upper 64 bits of a 128-bit vector of [2 x double] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeh_pd(double *__dp, __m128d __a)
-{
-  struct __mm_storeh_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
-}
-
-/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_pd(double *__dp, __m128d __a)
-{
-  struct __mm_storeh_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [16 x i8],
-///    saving the lower 8 bits of each sum in the corresponding element of a
-///    128-bit result vector of [16 x i8].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDB / PADDB </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [16 x i8].
-/// \param __b
-///    A 128-bit vector of [16 x i8].
-/// \returns A 128-bit vector of [16 x i8] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v16qu)__a + (__v16qu)__b);
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [8 x i16],
-///    saving the lower 16 bits of each sum in the corresponding element of a
-///    128-bit result vector of [8 x i16].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDW / PADDW </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x i16].
-/// \param __b
-///    A 128-bit vector of [8 x i16].
-/// \returns A 128-bit vector of [8 x i16] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hu)__a + (__v8hu)__b);
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [4 x i32],
-///    saving the lower 32 bits of each sum in the corresponding element of a
-///    128-bit result vector of [4 x i32].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDD / PADDD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x i32].
-/// \param __b
-///    A 128-bit vector of [4 x i32].
-/// \returns A 128-bit vector of [4 x i32] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a + (__v4su)__b);
-}
-
-/// Adds two signed or unsigned 64-bit integer values, returning the
-///    lower 64 bits of the sum.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PADDQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer.
-/// \param __b
-///    A 64-bit integer.
-/// \returns A 64-bit integer containing the sum of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_add_si64(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [2 x i64],
-///    saving the lower 64 bits of each sum in the corresponding element of a
-///    128-bit result vector of [2 x i64].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDQ / PADDQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x i64].
-/// \param __b
-///    A 128-bit vector of [2 x i64].
-/// \returns A 128-bit vector of [2 x i64] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a + (__v2du)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    signed [16 x i8] vectors, saving each sum in the corresponding element of
-///    a 128-bit result vector of [16 x i8]. Positive sums greater than 0x7F are
-///    saturated to 0x7F. Negative sums less than 0x80 are saturated to 0x80.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDSB / PADDSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [16 x i8] vector.
-/// \param __b
-///    A 128-bit signed [16 x i8] vector.
-/// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of
-///    both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    signed [8 x i16] vectors, saving each sum in the corresponding element of
-///    a 128-bit result vector of [8 x i16]. Positive sums greater than 0x7FFF
-///    are saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
-///    0x8000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDSW / PADDSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of
-///    both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    unsigned [16 x i8] vectors, saving each sum in the corresponding element
-///    of a 128-bit result vector of [16 x i8]. Positive sums greater than 0xFF
-///    are saturated to 0xFF. Negative sums are saturated to 0x00.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDUSB / PADDUSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums
-///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    unsigned [8 x i16] vectors, saving each sum in the corresponding element
-///    of a 128-bit result vector of [8 x i16]. Positive sums greater than
-///    0xFFFF are saturated to 0xFFFF. Negative sums are saturated to 0x0000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDUSB / PADDUSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [8 x i16] vector.
-/// \param __b
-///    A 128-bit unsigned [8 x i16] vector.
-/// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums
-///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Computes the rounded averages of corresponding elements of two
-///    128-bit unsigned [16 x i8] vectors, saving each result in the
-///    corresponding element of a 128-bit result vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAVGB / PAVGB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the rounded
-///    averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Computes the rounded averages of corresponding elements of two
-///    128-bit unsigned [8 x i16] vectors, saving each result in the
-///    corresponding element of a 128-bit result vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAVGW / PAVGW </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [8 x i16] vector.
-/// \param __b
-///    A 128-bit unsigned [8 x i16] vector.
-/// \returns A 128-bit unsigned [8 x i16] vector containing the rounded
-///    averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Multiplies the corresponding elements of two 128-bit signed [8 x i16]
-///    vectors, producing eight intermediate 32-bit signed integer products, and
-///    adds the consecutive pairs of 32-bit products to form a 128-bit signed
-///    [4 x i32] vector.
-///
-///    For example, bits [15:0] of both parameters are multiplied producing a
-///    32-bit product, bits [31:16] of both parameters are multiplied producing
-///    a 32-bit product, and the sum of those two products becomes bits [31:0]
-///    of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMADDWD / PMADDWD </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [4 x i32] vector containing the sums of products
-///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_madd_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit signed [8 x i16]
-///    vectors, saving the greater value from each comparison in the
-///    corresponding element of a 128-bit result vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMAXSW / PMAXSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the greater value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
-///    vectors, saving the greater value from each comparison in the
-///    corresponding element of a 128-bit result vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMAXUB / PMAXUB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the greater value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit signed [8 x i16]
-///    vectors, saving the smaller value from each comparison in the
-///    corresponding element of a 128-bit result vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMINSW / PMINSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the smaller value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
-///    vectors, saving the smaller value from each comparison in the
-///    corresponding element of a 128-bit result vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMINUB / PMINUB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the smaller value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Multiplies the corresponding elements of two signed [8 x i16]
-///    vectors, saving the upper 16 bits of each 32-bit product in the
-///    corresponding element of a 128-bit signed [8 x i16] result vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULHW / PMULHW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of
-///    each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Multiplies the corresponding elements of two unsigned [8 x i16]
-///    vectors, saving the upper 16 bits of each 32-bit product in the
-///    corresponding element of a 128-bit unsigned [8 x i16] result vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULHUW / PMULHUW </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [8 x i16] vector.
-/// \param __b
-///    A 128-bit unsigned [8 x i16] vector.
-/// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits
-///    of each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Multiplies the corresponding elements of two signed [8 x i16]
-///    vectors, saving the lower 16 bits of each 32-bit product in the
-///    corresponding element of a 128-bit signed [8 x i16] result vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULLW / PMULLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of
-///    each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hu)__a * (__v8hu)__b);
-}
-
-/// Multiplies 32-bit unsigned integer values contained in the lower bits
-///    of the two 64-bit integer vectors and returns the 64-bit unsigned
-///    product.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMULUDQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer containing one of the source operands.
-/// \param __b
-///    A 64-bit integer containing one of the source operands.
-/// \returns A 64-bit integer vector containing the product of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_mul_su32(__m64 __a, __m64 __b)
-{
-  return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
-}
-
-/// Multiplies 32-bit unsigned integer values contained in the lower
-///    bits of the corresponding elements of two [2 x i64] vectors, and returns
-///    the 64-bit products in the corresponding elements of a [2 x i64] vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULUDQ / PMULUDQ </c> instruction.
-///
-/// \param __a
-///    A [2 x i64] vector containing one of the source operands.
-/// \param __b
-///    A [2 x i64] vector containing one of the source operands.
-/// \returns A [2 x i64] vector containing the product of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epu32(__m128i __a, __m128i __b)
-{
-  return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
-}
-
-/// Computes the absolute differences of corresponding 8-bit integer
-///    values in two 128-bit vectors. Sums the first 8 absolute differences, and
-///    separately sums the second 8 absolute differences. Packs these two
-///    unsigned 16-bit integer sums into the upper and lower elements of a
-///    [2 x i64] vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSADBW / PSADBW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A [2 x i64] vector containing the sums of the sets of absolute
-///    differences between both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sad_epu8(__m128i __a, __m128i __b)
-{
-  return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Subtracts the corresponding 8-bit integer values in the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBB / PSUBB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v16qu)__a - (__v16qu)__b);
-}
-
-/// Subtracts the corresponding 16-bit integer values in the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBW / PSUBW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hu)__a - (__v8hu)__b);
-}
-
-/// Subtracts the corresponding 32-bit integer values in the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBD / PSUBD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a - (__v4su)__b);
-}
-
-/// Subtracts signed or unsigned 64-bit integer values and writes the
-///    difference to the corresponding bits in the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PSUBQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing the minuend.
-/// \param __b
-///    A 64-bit integer vector containing the subtrahend.
-/// \returns A 64-bit integer vector containing the difference of the values in
-///    the operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_sub_si64(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b);
-}
-
-/// Subtracts the corresponding elements of two [2 x i64] vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBQ / PSUBQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a - (__v2du)__b);
-}
-
-/// Subtracts corresponding 8-bit signed integer values in the input and
-///    returns the differences in the corresponding bytes in the destination.
-///    Differences greater than 0x7F are saturated to 0x7F, and differences less
-///    than 0x80 are saturated to 0x80.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBSB / PSUBSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Subtracts corresponding 16-bit signed integer values in the input and
-///    returns the differences in the corresponding bytes in the destination.
-///    Differences greater than 0x7FFF are saturated to 0x7FFF, and values less
-///    than 0x8000 are saturated to 0x8000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBSW / PSUBSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Subtracts corresponding 8-bit unsigned integer values in the input
-///    and returns the differences in the corresponding bytes in the
-///    destination. Differences less than 0x00 are saturated to 0x00.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBUSB / PSUBUSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the unsigned integer
-///    differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Subtracts corresponding 16-bit unsigned integer values in the input
-///    and returns the differences in the corresponding bytes in the
-///    destination. Differences less than 0x0000 are saturated to 0x0000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBUSW / PSUBUSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the unsigned integer
-///    differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAND / PAND </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the bitwise AND of the values
-///    in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_and_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a & (__v2du)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit integer vectors, using the
-///    one's complement of the values contained in the first source operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPANDN / PANDN </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector containing the left source operand. The one's complement
-///    of this value is used in the bitwise AND.
-/// \param __b
-///    A 128-bit vector containing the right source operand.
-/// \returns A 128-bit integer vector containing the bitwise AND of the one's
-///    complement of the first operand and the values in the second operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_andnot_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)(~(__v2du)__a & (__v2du)__b);
-}
-/// Performs a bitwise OR of two 128-bit integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPOR / POR </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the bitwise OR of the values
-///    in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_or_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a | (__v2du)__b);
-}
-
-/// Performs a bitwise exclusive OR of two 128-bit integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPXOR / PXOR </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the
-///    values in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_xor_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a ^ (__v2du)__b);
-}
-
-/// Left-shifts the 128-bit integer vector operand by the specified
-///    number of bytes. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_slli_si128(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSLLDQ / PSLLDQ </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector containing the source operand.
-/// \param imm
-///    An immediate value specifying the number of bytes to left-shift operand
-///    \a a.
-/// \returns A 128-bit integer vector containing the left-shifted value.
-#define _mm_slli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
-
-#define _mm_bslli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
-
-/// Left-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLW / PSLLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to left-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi16(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
-}
-
-/// Left-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLW / PSLLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to left-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi16(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
-}
-
-/// Left-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLD / PSLLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to left-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi32(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
-}
-
-/// Left-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLD / PSLLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to left-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi32(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
-}
-
-/// Left-shifts each 64-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLQ / PSLLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to left-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi64(__m128i __a, int __count)
-{
-  return __builtin_ia32_psllqi128((__v2di)__a, __count);
-}
-
-/// Left-shifts each 64-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLQ / PSLLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to left-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi64(__m128i __a, __m128i __count)
-{
-  return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count);
-}
-
-/// Right-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAW / PSRAW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi16(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
-}
-
-/// Right-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAW / PSRAW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi16(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
-}
-
-/// Right-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAD / PSRAD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi32(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
-}
-
-/// Right-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAD / PSRAD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi32(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
-}
-
-/// Right-shifts the 128-bit integer vector operand by the specified
-///    number of bytes. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_srli_si128(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSRLDQ / PSRLDQ </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector containing the source operand.
-/// \param imm
-///    An immediate value specifying the number of bytes to right-shift operand
-///    \a a.
-/// \returns A 128-bit integer vector containing the right-shifted value.
-#define _mm_srli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
-
-#define _mm_bsrli_si128(a, imm) \
-  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
-
-/// Right-shifts each of 16-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLW / PSRLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi16(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
-}
-
-/// Right-shifts each of 16-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLW / PSRLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi16(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
-}
-
-/// Right-shifts each of 32-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLD / PSRLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi32(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
-}
-
-/// Right-shifts each of 32-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLD / PSRLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi32(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
-}
-
-/// Right-shifts each of 64-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLQ / PSRLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi64(__m128i __a, int __count)
-{
-  return __builtin_ia32_psrlqi128((__v2di)__a, __count);
-}
-
-/// Right-shifts each of 64-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLQ / PSRLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi64(__m128i __a, __m128i __count)
-{
-  return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count);
-}
-
-/// Compares each of the corresponding 8-bit values of the 128-bit
-///    integer vectors for equality. Each comparison yields 0x0 for false, 0xFF
-///    for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQB / PCMPEQB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v16qi)__a == (__v16qi)__b);
-}
-
-/// Compares each of the corresponding 16-bit values of the 128-bit
-///    integer vectors for equality. Each comparison yields 0x0 for false,
-///    0xFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQW / PCMPEQW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hi)__a == (__v8hi)__b);
-}
-
-/// Compares each of the corresponding 32-bit values of the 128-bit
-///    integer vectors for equality. Each comparison yields 0x0 for false,
-///    0xFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQD / PCMPEQD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4si)__a == (__v4si)__b);
-}
-
-/// Compares each of the corresponding signed 8-bit values of the 128-bit
-///    integer vectors to determine if the values in the first operand are
-///    greater than those in the second operand. Each comparison yields 0x0 for
-///    false, 0xFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTB / PCMPGTB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi8(__m128i __a, __m128i __b)
-{
-  /* This function always performs a signed comparison, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)((__v16qs)__a > (__v16qs)__b);
-}
-
-/// Compares each of the corresponding signed 16-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTW / PCMPGTW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hi)__a > (__v8hi)__b);
-}
-
-/// Compares each of the corresponding signed 32-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTD / PCMPGTD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4si)__a > (__v4si)__b);
-}
-
-/// Compares each of the corresponding signed 8-bit values of the 128-bit
-///    integer vectors to determine if the values in the first operand are less
-///    than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTB / PCMPGTB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi8(__m128i __a, __m128i __b)
-{
-  return _mm_cmpgt_epi8(__b, __a);
-}
-
-/// Compares each of the corresponding signed 16-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are less than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTW / PCMPGTW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi16(__m128i __a, __m128i __b)
-{
-  return _mm_cmpgt_epi16(__b, __a);
-}
-
-/// Compares each of the corresponding signed 32-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are less than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTD / PCMPGTD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi32(__m128i __a, __m128i __b)
-{
-  return _mm_cmpgt_epi32(__b, __a);
-}
-
-#ifdef __x86_64__
-/// Converts a 64-bit signed integer value from the second operand into a
-///    double-precision value and returns it in the lower element of a [2 x
-///    double] vector; the upper element of the returned vector is copied from
-///    the upper element of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SD / CVTSI2SD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits of this operand are
-///    copied to the upper 64 bits of the destination.
-/// \param __b
-///    A 64-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    converted value of the second operand. The upper 64 bits are copied from
-///    the upper 64 bits of the first operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi64_sd(__m128d __a, long long __b)
-{
-  __a[0] = __b;
-  return __a;
-}
-
-/// Converts the first (lower) element of a vector of [2 x double] into a
-///    64-bit signed integer value, according to the current rounding mode.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSD2SI / CVTSD2SI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsd_si64(__m128d __a)
-{
-  return __builtin_ia32_cvtsd2si64((__v2df)__a);
-}
-
-/// Converts the first (lower) element of a vector of [2 x double] into a
-///    64-bit signed integer value, truncating the result when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSD2SI / CVTTSD2SI </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvttsd_si64(__m128d __a)
-{
-  return __builtin_ia32_cvttsd2si64((__v2df)__a);
-}
-#endif
-
-/// Converts a vector of [4 x i32] into a vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTDQ2PS / CVTDQ2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 128-bit vector of [4 x float] containing the converted values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtepi32_ps(__m128i __a)
-{
-  return (__m128)__builtin_convertvector((__v4si)__a, __v4sf);
-}
-
-/// Converts a vector of [4 x float] into a vector of [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPS2DQ / CVTPS2DQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit integer vector of [4 x i32] containing the converted
-///    values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtps_epi32(__m128 __a)
-{
-  return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);
-}
-
-/// Converts a vector of [4 x float] into a vector of [4 x i32],
-///    truncating the result when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTPS2DQ / CVTTPS2DQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x i32] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttps_epi32(__m128 __a)
-{
-  return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a);
-}
-
-/// Returns a vector of [4 x i32] where the lowest element is the input
-///    operand and the remaining elements are zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __a
-///    A 32-bit signed integer operand.
-/// \returns A 128-bit vector of [4 x i32].
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi32_si128(int __a)
-{
-  return __extension__ (__m128i)(__v4si){ __a, 0, 0, 0 };
-}
-
-#ifdef __x86_64__
-/// Returns a vector of [2 x i64] where the lower element is the input
-///    operand and the upper element is zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [2 x i64] containing the converted value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi64_si128(long long __a)
-{
-  return __extension__ (__m128i)(__v2di){ __a, 0 };
-}
-#endif
-
-/// Moves the least significant 32 bits of a vector of [4 x i32] to a
-///    32-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __a
-///    A vector of [4 x i32]. The least significant 32 bits are moved to the
-///    destination.
-/// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si32(__m128i __a)
-{
-  __v4si __b = (__v4si)__a;
-  return __b[0];
-}
-
-#ifdef __x86_64__
-/// Moves the least significant 64 bits of a vector of [2 x i64] to a
-///    64-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A vector of [2 x i64]. The least significant 64 bits are moved to the
-///    destination.
-/// \returns A 64-bit signed integer containing the moved value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si64(__m128i __a)
-{
-  return __a[0];
-}
-#endif
-
-/// Moves packed integer values from an aligned 128-bit memory location
-///    to elements in a 128-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDQA / MOVDQA </c> instruction.
-///
-/// \param __p
-///    An aligned pointer to a memory location containing integer values.
-/// \returns A 128-bit integer vector containing the moved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_load_si128(__m128i const *__p)
-{
-  return *__p;
-}
-
-/// Moves packed integer values from an unaligned 128-bit memory location
-///    to elements in a 128-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDQU / MOVDQU </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location containing integer values.
-/// \returns A 128-bit integer vector containing the moved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si128(__m128i_u const *__p)
-{
-  struct __loadu_si128 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_si128*)__p)->__v;
-}
-
-/// Returns a vector of [2 x i64] where the lower element is taken from
-///    the lower element of the operand, and the upper element is zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __p
-///    A 128-bit vector of [2 x i64]. Bits [63:0] are written to bits [63:0] of
-///    the destination.
-/// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the
-///    moved value. The higher order bits are cleared.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadl_epi64(__m128i_u const *__p)
-{
-  struct __mm_loadl_epi64_struct {
-    long long __u;
-  } __attribute__((__packed__, __may_alias__));
-  return __extension__ (__m128i) { ((const struct __mm_loadl_epi64_struct*)__p)->__u, 0};
-}
-
-/// Generates a 128-bit vector of [4 x i32] with unspecified content.
-///    This could be used as an argument to another intrinsic function where the
-///    argument is required but the value is not actually used.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 128-bit vector of [4 x i32] with unspecified content.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_undefined_si128(void)
-{
-  return (__m128i)__builtin_ia32_undef128();
-}
-
-/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
-///    the specified 64-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q1
-///    A 64-bit integer value used to initialize the upper 64 bits of the
-///    destination vector of [2 x i64].
-/// \param __q0
-///    A 64-bit integer value used to initialize the lower 64 bits of the
-///    destination vector of [2 x i64].
-/// \returns An initialized 128-bit vector of [2 x i64] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64x(long long __q1, long long __q0)
-{
-  return __extension__ (__m128i)(__v2di){ __q0, __q1 };
-}
-
-/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
-///    the specified 64-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q1
-///    A 64-bit integer value used to initialize the upper 64 bits of the
-///    destination vector of [2 x i64].
-/// \param __q0
-///    A 64-bit integer value used to initialize the lower 64 bits of the
-///    destination vector of [2 x i64].
-/// \returns An initialized 128-bit vector of [2 x i64] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64(__m64 __q1, __m64 __q0)
-{
-  return _mm_set_epi64x((long long)__q1, (long long)__q0);
-}
-
-/// Initializes the 32-bit values in a 128-bit vector of [4 x i32] with
-///    the specified 32-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __i3
-///    A 32-bit integer value used to initialize bits [127:96] of the
-///    destination vector.
-/// \param __i2
-///    A 32-bit integer value used to initialize bits [95:64] of the destination
-///    vector.
-/// \param __i1
-///    A 32-bit integer value used to initialize bits [63:32] of the destination
-///    vector.
-/// \param __i0
-///    A 32-bit integer value used to initialize bits [31:0] of the destination
-///    vector.
-/// \returns An initialized 128-bit vector of [4 x i32] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
-{
-  return __extension__ (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
-}
-
-/// Initializes the 16-bit values in a 128-bit vector of [8 x i16] with
-///    the specified 16-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __w7
-///    A 16-bit integer value used to initialize bits [127:112] of the
-///    destination vector.
-/// \param __w6
-///    A 16-bit integer value used to initialize bits [111:96] of the
-///    destination vector.
-/// \param __w5
-///    A 16-bit integer value used to initialize bits [95:80] of the destination
-///    vector.
-/// \param __w4
-///    A 16-bit integer value used to initialize bits [79:64] of the destination
-///    vector.
-/// \param __w3
-///    A 16-bit integer value used to initialize bits [63:48] of the destination
-///    vector.
-/// \param __w2
-///    A 16-bit integer value used to initialize bits [47:32] of the destination
-///    vector.
-/// \param __w1
-///    A 16-bit integer value used to initialize bits [31:16] of the destination
-///    vector.
-/// \param __w0
-///    A 16-bit integer value used to initialize bits [15:0] of the destination
-///    vector.
-/// \returns An initialized 128-bit vector of [8 x i16] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
-{
-  return __extension__ (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
-}
-
-/// Initializes the 8-bit values in a 128-bit vector of [16 x i8] with
-///    the specified 8-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __b15
-///    Initializes bits [127:120] of the destination vector.
-/// \param __b14
-///    Initializes bits [119:112] of the destination vector.
-/// \param __b13
-///    Initializes bits [111:104] of the destination vector.
-/// \param __b12
-///    Initializes bits [103:96] of the destination vector.
-/// \param __b11
-///    Initializes bits [95:88] of the destination vector.
-/// \param __b10
-///    Initializes bits [87:80] of the destination vector.
-/// \param __b9
-///    Initializes bits [79:72] of the destination vector.
-/// \param __b8
-///    Initializes bits [71:64] of the destination vector.
-/// \param __b7
-///    Initializes bits [63:56] of the destination vector.
-/// \param __b6
-///    Initializes bits [55:48] of the destination vector.
-/// \param __b5
-///    Initializes bits [47:40] of the destination vector.
-/// \param __b4
-///    Initializes bits [39:32] of the destination vector.
-/// \param __b3
-///    Initializes bits [31:24] of the destination vector.
-/// \param __b2
-///    Initializes bits [23:16] of the destination vector.
-/// \param __b1
-///    Initializes bits [15:8] of the destination vector.
-/// \param __b0
-///    Initializes bits [7:0] of the destination vector.
-/// \returns An initialized 128-bit vector of [16 x i8] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
-{
-  return __extension__ (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
-}
-
-/// Initializes both values in a 128-bit integer vector with the
-///    specified 64-bit integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q
-///    Integer value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit integer vector of [2 x i64] with both
-///    elements containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64x(long long __q)
-{
-  return _mm_set_epi64x(__q, __q);
-}
-
-/// Initializes both values in a 128-bit vector of [2 x i64] with the
-///    specified 64-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q
-///    A 64-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [2 x i64] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64(__m64 __q)
-{
-  return _mm_set_epi64(__q, __q);
-}
-
-/// Initializes all values in a 128-bit vector of [4 x i32] with the
-///    specified 32-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __i
-///    A 32-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [4 x i32] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi32(int __i)
-{
-  return _mm_set_epi32(__i, __i, __i, __i);
-}
-
-/// Initializes all values in a 128-bit vector of [8 x i16] with the
-///    specified 16-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __w
-///    A 16-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [8 x i16] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi16(short __w)
-{
-  return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
-}
-
-/// Initializes all values in a 128-bit vector of [16 x i8] with the
-///    specified 8-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __b
-///    An 8-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [16 x i8] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi8(char __b)
-{
-  return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 64-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic does not correspond to a specific instruction.
-///
-/// \param __q0
-///    A 64-bit integral value used to initialize the lower 64 bits of the
-///    result.
-/// \param __q1
-///    A 64-bit integral value used to initialize the upper 64 bits of the
-///    result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi64(__m64 __q0, __m64 __q1)
-{
-  return _mm_set_epi64(__q1, __q0);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 32-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __i0
-///    A 32-bit integral value used to initialize bits [31:0] of the result.
-/// \param __i1
-///    A 32-bit integral value used to initialize bits [63:32] of the result.
-/// \param __i2
-///    A 32-bit integral value used to initialize bits [95:64] of the result.
-/// \param __i3
-///    A 32-bit integral value used to initialize bits [127:96] of the result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
-{
-  return _mm_set_epi32(__i3, __i2, __i1, __i0);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 16-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __w0
-///    A 16-bit integral value used to initialize bits [15:0] of the result.
-/// \param __w1
-///    A 16-bit integral value used to initialize bits [31:16] of the result.
-/// \param __w2
-///    A 16-bit integral value used to initialize bits [47:32] of the result.
-/// \param __w3
-///    A 16-bit integral value used to initialize bits [63:48] of the result.
-/// \param __w4
-///    A 16-bit integral value used to initialize bits [79:64] of the result.
-/// \param __w5
-///    A 16-bit integral value used to initialize bits [95:80] of the result.
-/// \param __w6
-///    A 16-bit integral value used to initialize bits [111:96] of the result.
-/// \param __w7
-///    A 16-bit integral value used to initialize bits [127:112] of the result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
-{
-  return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 8-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __b0
-///    An 8-bit integral value used to initialize bits [7:0] of the result.
-/// \param __b1
-///    An 8-bit integral value used to initialize bits [15:8] of the result.
-/// \param __b2
-///    An 8-bit integral value used to initialize bits [23:16] of the result.
-/// \param __b3
-///    An 8-bit integral value used to initialize bits [31:24] of the result.
-/// \param __b4
-///    An 8-bit integral value used to initialize bits [39:32] of the result.
-/// \param __b5
-///    An 8-bit integral value used to initialize bits [47:40] of the result.
-/// \param __b6
-///    An 8-bit integral value used to initialize bits [55:48] of the result.
-/// \param __b7
-///    An 8-bit integral value used to initialize bits [63:56] of the result.
-/// \param __b8
-///    An 8-bit integral value used to initialize bits [71:64] of the result.
-/// \param __b9
-///    An 8-bit integral value used to initialize bits [79:72] of the result.
-/// \param __b10
-///    An 8-bit integral value used to initialize bits [87:80] of the result.
-/// \param __b11
-///    An 8-bit integral value used to initialize bits [95:88] of the result.
-/// \param __b12
-///    An 8-bit integral value used to initialize bits [103:96] of the result.
-/// \param __b13
-///    An 8-bit integral value used to initialize bits [111:104] of the result.
-/// \param __b14
-///    An 8-bit integral value used to initialize bits [119:112] of the result.
-/// \param __b15
-///    An 8-bit integral value used to initialize bits [127:120] of the result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
-{
-  return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8, __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
-}
-
-/// Creates a 128-bit integer vector initialized to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
-///
-/// \returns An initialized 128-bit integer vector with all elements set to
-///    zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setzero_si128(void)
-{
-  return __extension__ (__m128i)(__v2di){ 0LL, 0LL };
-}
-
-/// Stores a 128-bit integer vector to a memory location aligned on a
-///    128-bit boundary.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS </c> instruction.
-///
-/// \param __p
-///    A pointer to an aligned memory location that will receive the integer
-///    values.
-/// \param __b
-///    A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_si128(__m128i *__p, __m128i __b)
-{
-  *__p = __b;
-}
-
-/// Stores a 128-bit integer vector to an unaligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPS / MOVUPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the integer values.
-/// \param __b
-///    A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si128(__m128i_u *__p, __m128i __b)
-{
-  struct __storeu_si128 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si128*)__p)->__v = __b;
-}
-
-/// Stores a 64-bit integer value from the low element of a 128-bit integer
-///    vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __p
-///    A pointer to a 64-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __b
-///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si64(void *__p, __m128i __b)
-{
-  struct __storeu_si64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si64*)__p)->__v = ((__v2di)__b)[0];
-}
-
-/// Stores a 32-bit integer value from the low element of a 128-bit integer
-///    vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __p
-///    A pointer to a 32-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __b
-///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si32(void *__p, __m128i __b)
-{
-  struct __storeu_si32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si32*)__p)->__v = ((__v4si)__b)[0];
-}
-
-/// Stores a 16-bit integer value from the low element of a 128-bit integer
-///    vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic does not correspond to a specific instruction.
-///
-/// \param __p
-///    A pointer to a 16-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __b
-///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si16(void *__p, __m128i __b)
-{
-  struct __storeu_si16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si16*)__p)->__v = ((__v8hi)__b)[0];
-}
-
-/// Moves bytes selected by the mask from the first operand to the
-///    specified unaligned memory location. When a mask bit is 1, the
-///    corresponding byte is written, otherwise it is not written.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon). Exception and trap behavior for elements not selected
-///    for storage to memory are implementation dependent.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMASKMOVDQU / MASKMOVDQU </c>
-///   instruction.
-///
-/// \param __d
-///    A 128-bit integer vector containing the values to be moved.
-/// \param __n
-///    A 128-bit integer vector containing the mask. The most significant bit of
-///    each byte represents the mask bits.
-/// \param __p
-///    A pointer to an unaligned 128-bit memory location where the specified
-///    values are moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
-{
-  __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
-}
-
-/// Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to
-///    a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPS / MOVLPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 64-bit memory location that will receive the lower 64 bits
-///    of the integer vector parameter.
-/// \param __a
-///    A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the
-///    value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_epi64(__m128i_u *__p, __m128i __a)
-{
-  struct __mm_storel_epi64_struct {
-    long long __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
-}
-
-/// Stores a 128-bit floating point vector of [2 x double] to a 128-bit
-///    aligned memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
-///
-/// \param __p
-///    A pointer to the 128-bit aligned memory location used to store the value.
-/// \param __a
-///    A vector of [2 x double] containing the 64-bit values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_pd(double *__p, __m128d __a)
-{
-  __builtin_nontemporal_store((__v2df)__a, (__v2df*)__p);
-}
-
-/// Stores a 128-bit integer vector to a 128-bit aligned memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
-///
-/// \param __p
-///    A pointer to the 128-bit aligned memory location used to store the value.
-/// \param __a
-///    A 128-bit integer vector containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_si128(__m128i *__p, __m128i __a)
-{
-  __builtin_nontemporal_store((__v2di)__a, (__v2di*)__p);
-}
-
-/// Stores a 32-bit integer value in the specified memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVNTI </c> instruction.
-///
-/// \param __p
-///    A pointer to the 32-bit memory location used to store the value.
-/// \param __a
-///    A 32-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si32(int *__p, int __a)
-{
-  __builtin_ia32_movnti(__p, __a);
-}
-
-#ifdef __x86_64__
-/// Stores a 64-bit integer value in the specified memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVNTIQ </c> instruction.
-///
-/// \param __p
-///    A pointer to the 64-bit memory location used to store the value.
-/// \param __a
-///    A 64-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si64(long long *__p, long long __a)
-{
-  __builtin_ia32_movnti64(__p, __a);
-}
-#endif
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/// The cache line containing \a __p is flushed and invalidated from all
-///    caches in the coherency domain.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CLFLUSH </c> instruction.
-///
-/// \param __p
-///    A pointer to the memory location used to identify the cache line to be
-///    flushed.
-void _mm_clflush(void const * __p);
-
-/// Forces strong memory ordering (serialization) between load
-///    instructions preceding this instruction and load instructions following
-///    this instruction, ensuring the system completes all previous loads before
-///    executing subsequent loads.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> LFENCE </c> instruction.
-///
-void _mm_lfence(void);
-
-/// Forces strong memory ordering (serialization) between load and store
-///    instructions preceding this instruction and load and store instructions
-///    following this instruction, ensuring that the system completes all
-///    previous memory accesses before executing subsequent memory accesses.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MFENCE </c> instruction.
-///
-void _mm_mfence(void);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-
-/// Converts 16-bit signed integers from both 128-bit integer vector
-///    operands into 8-bit signed integers, and packs the results into the
-///    destination. Positive values greater than 0x7F are saturated to 0x7F.
-///    Negative values less than 0x80 are saturated to 0x80.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKSSWB / PACKSSWB </c> instruction.
-///
-/// \param __a
-///   A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///   a signed integer and is converted to a 8-bit signed integer with
-///   saturation. Values greater than 0x7F are saturated to 0x7F. Values less
-///   than 0x80 are saturated to 0x80. The converted [8 x i8] values are
-///   written to the lower 64 bits of the result.
-/// \param __b
-///   A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///   a signed integer and is converted to a 8-bit signed integer with
-///   saturation. Values greater than 0x7F are saturated to 0x7F. Values less
-///   than 0x80 are saturated to 0x80. The converted [8 x i8] values are
-///   written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Converts 32-bit signed integers from both 128-bit integer vector
-///    operands into 16-bit signed integers, and packs the results into the
-///    destination. Positive values greater than 0x7FFF are saturated to 0x7FFF.
-///    Negative values less than 0x8000 are saturated to 0x8000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKSSDW / PACKSSDW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
-///    a signed integer and is converted to a 16-bit signed integer with
-///    saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
-///    less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
-///    are written to the lower 64 bits of the result.
-/// \param __b
-///    A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
-///    a signed integer and is converted to a 16-bit signed integer with
-///    saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
-///    less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
-///    are written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
-}
-
-/// Converts 16-bit signed integers from both 128-bit integer vector
-///    operands into 8-bit unsigned integers, and packs the results into the
-///    destination. Values greater than 0xFF are saturated to 0xFF. Values less
-///    than 0x00 are saturated to 0x00.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKUSWB / PACKUSWB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///    a signed integer and is converted to an 8-bit unsigned integer with
-///    saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-///    than 0x00 are saturated to 0x00. The converted [8 x i8] values are
-///    written to the lower 64 bits of the result.
-/// \param __b
-///    A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///    a signed integer and is converted to an 8-bit unsigned integer with
-///    saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-///    than 0x00 are saturated to 0x00. The converted [8 x i8] values are
-///    written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Extracts 16 bits from a 128-bit integer vector of [8 x i16], using
-///    the immediate-value parameter as a selector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPEXTRW / PEXTRW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __imm
-///    An immediate value. Bits [2:0] selects values from \a __a to be assigned
-///    to bits[15:0] of the result. \n
-///    000: assign values from bits [15:0] of \a __a. \n
-///    001: assign values from bits [31:16] of \a __a. \n
-///    010: assign values from bits [47:32] of \a __a. \n
-///    011: assign values from bits [63:48] of \a __a. \n
-///    100: assign values from bits [79:64] of \a __a. \n
-///    101: assign values from bits [95:80] of \a __a. \n
-///    110: assign values from bits [111:96] of \a __a. \n
-///    111: assign values from bits [127:112] of \a __a.
-/// \returns An integer, whose lower 16 bits are selected from the 128-bit
-///    integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi16(a, imm) \
-  ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
-                                                    (int)(imm)))
-
-/// Constructs a 128-bit integer vector by first making a copy of the
-///    128-bit integer vector parameter, and then inserting the lower 16 bits
-///    of an integer parameter into an offset specified by the immediate-value
-///    parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPINSRW / PINSRW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [8 x i16]. This vector is copied to the
-///    result and then one of the eight elements in the result is replaced by
-///    the lower 16 bits of \a __b.
-/// \param __b
-///    An integer. The lower 16 bits of this parameter are written to the
-///    result beginning at an offset specified by \a __imm.
-/// \param __imm
-///    An immediate value specifying the bit offset in the result at which the
-///    lower 16 bits of \a __b are written.
-/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi16(a, b, imm) \
-  ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
-                                        (int)(imm)))
-
-/// Copies the values of the most significant bits from each 8-bit
-///    element in a 128-bit integer vector of [16 x i8] to create a 16-bit mask
-///    value, zero-extends the value, and writes it to the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVMSKB / PMOVMSKB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the values with bits to be extracted.
-/// \returns The most significant bits from each 8-bit element in \a __a,
-///    written to bits [15:0]. The other bits are assigned zeros.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_epi8(__m128i __a)
-{
-  return __builtin_ia32_pmovmskb128((__v16qi)__a);
-}
-
-/// Constructs a 128-bit integer vector by shuffling four 32-bit
-///    elements of a 128-bit integer vector parameter, using the immediate-value
-///    parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_shuffle_epi32(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSHUFD / PSHUFD </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector containing the values to be copied.
-/// \param imm
-///    An immediate value containing an 8-bit value specifying which elements to
-///    copy from a. The destinations within the 128-bit destination are assigned
-///    values as follows: \n
-///    Bits [1:0] are used to assign values to bits [31:0] of the result. \n
-///    Bits [3:2] are used to assign values to bits [63:32] of the result. \n
-///    Bits [5:4] are used to assign values to bits [95:64] of the result. \n
-///    Bits [7:6] are used to assign values to bits [127:96] of the result. \n
-///    Bit value assignments: \n
-///    00: assign values from bits [31:0] of \a a. \n
-///    01: assign values from bits [63:32] of \a a. \n
-///    10: assign values from bits [95:64] of \a a. \n
-///    11: assign values from bits [127:96] of \a a.
-/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shuffle_epi32(a, imm) \
-  ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm)))
-
-/// Constructs a 128-bit integer vector by shuffling four lower 16-bit
-///    elements of a 128-bit integer vector of [8 x i16], using the immediate
-///    value parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_shufflelo_epi16(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSHUFLW / PSHUFLW </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector of [8 x i16]. Bits [127:64] are copied to bits
-///    [127:64] of the result.
-/// \param imm
-///    An 8-bit immediate value specifying which elements to copy from \a a. \n
-///    Bits[1:0] are used to assign values to bits [15:0] of the result. \n
-///    Bits[3:2] are used to assign values to bits [31:16] of the result. \n
-///    Bits[5:4] are used to assign values to bits [47:32] of the result. \n
-///    Bits[7:6] are used to assign values to bits [63:48] of the result. \n
-///    Bit value assignments: \n
-///    00: assign values from bits [15:0] of \a a. \n
-///    01: assign values from bits [31:16] of \a a. \n
-///    10: assign values from bits [47:32] of \a a. \n
-///    11: assign values from bits [63:48] of \a a. \n
-/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflelo_epi16(a, imm) \
-  ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm)))
-
-/// Constructs a 128-bit integer vector by shuffling four upper 16-bit
-///    elements of a 128-bit integer vector of [8 x i16], using the immediate
-///    value parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_shufflehi_epi16(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSHUFHW / PSHUFHW </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector of [8 x i16]. Bits [63:0] are copied to bits
-///    [63:0] of the result.
-/// \param imm
-///    An 8-bit immediate value specifying which elements to copy from \a a. \n
-///    Bits[1:0] are used to assign values to bits [79:64] of the result. \n
-///    Bits[3:2] are used to assign values to bits [95:80] of the result. \n
-///    Bits[5:4] are used to assign values to bits [111:96] of the result. \n
-///    Bits[7:6] are used to assign values to bits [127:112] of the result. \n
-///    Bit value assignments: \n
-///    00: assign values from bits [79:64] of \a a. \n
-///    01: assign values from bits [95:80] of \a a. \n
-///    10: assign values from bits [111:96] of \a a. \n
-///    11: assign values from bits [127:112] of \a a. \n
-/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflehi_epi16(a, imm) \
-  ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm)))
-
-/// Unpacks the high-order (index 8-15) values from two 128-bit vectors
-///    of [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHBW / PUNPCKHBW </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [16 x i8].
-///    Bits [71:64] are written to bits [7:0] of the result. \n
-///    Bits [79:72] are written to bits [23:16] of the result. \n
-///    Bits [87:80] are written to bits [39:32] of the result. \n
-///    Bits [95:88] are written to bits [55:48] of the result. \n
-///    Bits [103:96] are written to bits [71:64] of the result. \n
-///    Bits [111:104] are written to bits [87:80] of the result. \n
-///    Bits [119:112] are written to bits [103:96] of the result. \n
-///    Bits [127:120] are written to bits [119:112] of the result.
-/// \param __b
-///    A 128-bit vector of [16 x i8]. \n
-///    Bits [71:64] are written to bits [15:8] of the result. \n
-///    Bits [79:72] are written to bits [31:24] of the result. \n
-///    Bits [87:80] are written to bits [47:40] of the result. \n
-///    Bits [95:88] are written to bits [63:56] of the result. \n
-///    Bits [103:96] are written to bits [79:72] of the result. \n
-///    Bits [111:104] are written to bits [95:88] of the result. \n
-///    Bits [119:112] are written to bits [111:104] of the result. \n
-///    Bits [127:120] are written to bits [127:120] of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
-}
-
-/// Unpacks the high-order (index 4-7) values from two 128-bit vectors of
-///    [8 x i16] and interleaves them into a 128-bit vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHWD / PUNPCKHWD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x i16].
-///    Bits [79:64] are written to bits [15:0] of the result. \n
-///    Bits [95:80] are written to bits [47:32] of the result. \n
-///    Bits [111:96] are written to bits [79:64] of the result. \n
-///    Bits [127:112] are written to bits [111:96] of the result.
-/// \param __b
-///    A 128-bit vector of [8 x i16].
-///    Bits [79:64] are written to bits [31:16] of the result. \n
-///    Bits [95:80] are written to bits [63:48] of the result. \n
-///    Bits [111:96] are written to bits [95:80] of the result. \n
-///    Bits [127:112] are written to bits [127:112] of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
-}
-
-/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
-///    [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHDQ / PUNPCKHDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [95:64] are written to bits [31:0] of the destination. \n
-///    Bits [127:96] are written to bits [95:64] of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [95:64] are written to bits [64:32] of the destination. \n
-///    Bits [127:96] are written to bits [127:96] of the destination.
-/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
-}
-
-/// Unpacks the high-order 64-bit elements from two 128-bit vectors of
-///    [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHQDQ / PUNPCKHQDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [127:64] are written to bits [63:0] of the destination.
-/// \param __b
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [127:64] are written to bits [127:64] of the destination.
-/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1);
-}
-
-/// Unpacks the low-order (index 0-7) values from two 128-bit vectors of
-///    [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLBW / PUNPCKLBW </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [16 x i8]. \n
-///    Bits [7:0] are written to bits [7:0] of the result. \n
-///    Bits [15:8] are written to bits [23:16] of the result. \n
-///    Bits [23:16] are written to bits [39:32] of the result. \n
-///    Bits [31:24] are written to bits [55:48] of the result. \n
-///    Bits [39:32] are written to bits [71:64] of the result. \n
-///    Bits [47:40] are written to bits [87:80] of the result. \n
-///    Bits [55:48] are written to bits [103:96] of the result. \n
-///    Bits [63:56] are written to bits [119:112] of the result.
-/// \param __b
-///    A 128-bit vector of [16 x i8].
-///    Bits [7:0] are written to bits [15:8] of the result. \n
-///    Bits [15:8] are written to bits [31:24] of the result. \n
-///    Bits [23:16] are written to bits [47:40] of the result. \n
-///    Bits [31:24] are written to bits [63:56] of the result. \n
-///    Bits [39:32] are written to bits [79:72] of the result. \n
-///    Bits [47:40] are written to bits [95:88] of the result. \n
-///    Bits [55:48] are written to bits [111:104] of the result. \n
-///    Bits [63:56] are written to bits [127:120] of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
-}
-
-/// Unpacks the low-order (index 0-3) values from each of the two 128-bit
-///    vectors of [8 x i16] and interleaves them into a 128-bit vector of
-///    [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLWD / PUNPCKLWD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x i16].
-///    Bits [15:0] are written to bits [15:0] of the result. \n
-///    Bits [31:16] are written to bits [47:32] of the result. \n
-///    Bits [47:32] are written to bits [79:64] of the result. \n
-///    Bits [63:48] are written to bits [111:96] of the result.
-/// \param __b
-///    A 128-bit vector of [8 x i16].
-///    Bits [15:0] are written to bits [31:16] of the result. \n
-///    Bits [31:16] are written to bits [63:48] of the result. \n
-///    Bits [47:32] are written to bits [95:80] of the result. \n
-///    Bits [63:48] are written to bits [127:112] of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
-}
-
-/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of
-///    [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLDQ / PUNPCKLDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [31:0] are written to bits [31:0] of the destination. \n
-///    Bits [63:32] are written to bits [95:64] of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [31:0] are written to bits [64:32] of the destination. \n
-///    Bits [63:32] are written to bits [127:96] of the destination.
-/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
-}
-
-/// Unpacks the low-order 64-bit elements from two 128-bit vectors of
-///    [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLQDQ / PUNPCKLQDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [63:0] are written to bits [63:0] of the destination. \n
-/// \param __b
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [63:0] are written to bits [127:64] of the destination. \n
-/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0);
-}
-
-/// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit
-///    integer.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVDQ2Q </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector operand. The lower 64 bits are moved to the
-///    destination.
-/// \returns A 64-bit integer containing the lower 64 bits of the parameter.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_movepi64_pi64(__m128i __a)
-{
-  return (__m64)__a[0];
-}
-
-/// Moves the 64-bit operand to a 128-bit integer vector, zeroing the
-///    upper bits.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVD+VMOVQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit value.
-/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
-///    the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_movpi64_epi64(__m64 __a)
-{
-  return __extension__ (__m128i)(__v2di){ (long long)__a, 0 };
-}
-
-/// Moves the lower 64 bits of a 128-bit integer vector to a 128-bit
-///    integer vector, zeroing the upper bits.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector operand. The lower 64 bits are moved to the
-///    destination.
-/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
-///    the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_move_epi64(__m128i __a)
-{
-  return __builtin_shufflevector((__v2di)__a, _mm_setzero_si128(), 0, 2);
-}
-
-/// Unpacks the high-order 64-bit elements from two 128-bit vectors of
-///    [2 x double] and interleaves them into a 128-bit vector of [2 x
-///    double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKHPD / UNPCKHPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [127:64] are written to bits [63:0] of the destination.
-/// \param __b
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [127:64] are written to bits [127:64] of the destination.
-/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpackhi_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1);
-}
-
-/// Unpacks the low-order 64-bit elements from two 128-bit vectors
-///    of [2 x double] and interleaves them into a 128-bit vector of [2 x
-///    double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [63:0] are written to bits [63:0] of the destination.
-/// \param __b
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [63:0] are written to bits [127:64] of the destination.
-/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpacklo_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0);
-}
-
-/// Extracts the sign bits of the double-precision values in the 128-bit
-///    vector of [2 x double], zero-extends the value, and writes it to the
-///    low-order bits of the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVMSKPD / MOVMSKPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values with sign bits to
-///    be extracted.
-/// \returns The sign bits from each of the double-precision elements in \a __a,
-///    written to bits [1:0]. The remaining bits are assigned values of zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_pd(__m128d __a)
-{
-  return __builtin_ia32_movmskpd((__v2df)__a);
-}
-
-
-/// Constructs a 128-bit floating-point vector of [2 x double] from two
-///    128-bit vector parameters of [2 x double], using the immediate-value
-///     parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_shuffle_pd(__m128d a, __m128d b, const int i);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VSHUFPD / SHUFPD </c> instruction.
-///
-/// \param a
-///    A 128-bit vector of [2 x double].
-/// \param b
-///    A 128-bit vector of [2 x double].
-/// \param i
-///    An 8-bit immediate value. The least significant two bits specify which
-///    elements to copy from \a a and \a b: \n
-///    Bit[0] = 0: lower element of \a a copied to lower element of result. \n
-///    Bit[0] = 1: upper element of \a a copied to lower element of result. \n
-///    Bit[1] = 0: lower element of \a b copied to upper element of result. \n
-///    Bit[1] = 1: upper element of \a b copied to upper element of result. \n
-/// \returns A 128-bit vector of [2 x double] containing the shuffled values.
-#define _mm_shuffle_pd(a, b, i) \
-  ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
-                                  (int)(i)))
-
-/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
-///    floating-point vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [2 x double].
-/// \returns A 128-bit floating-point vector of [4 x float] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castpd_ps(__m128d __a)
-{
-  return (__m128)__a;
-}
-
-/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
-///    integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [2 x double].
-/// \returns A 128-bit integer vector containing the same bitwise pattern as the
-///    parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castpd_si128(__m128d __a)
-{
-  return (__m128i)__a;
-}
-
-/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
-///    floating-point vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float].
-/// \returns A 128-bit floating-point vector of [2 x double] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castps_pd(__m128 __a)
-{
-  return (__m128d)__a;
-}
-
-/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
-///    integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float].
-/// \returns A 128-bit integer vector containing the same bitwise pattern as the
-///    parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castps_si128(__m128 __a)
-{
-  return (__m128i)__a;
-}
-
-/// Casts a 128-bit integer vector into a 128-bit floating-point vector
-///    of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 128-bit floating-point vector of [4 x float] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castsi128_ps(__m128i __a)
-{
-  return (__m128)__a;
-}
-
-/// Casts a 128-bit integer vector into a 128-bit floating-point vector
-///    of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 128-bit floating-point vector of [2 x double] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castsi128_pd(__m128i __a)
-{
-  return (__m128d)__a;
-}
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/// Indicates that a spin loop is being executed for the purposes of
-///    optimizing power consumption during the loop.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PAUSE </c> instruction.
-///
-void _mm_pause(void);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-#undef __DEFAULT_FN_ATTRS
-#undef __DEFAULT_FN_ATTRS_MMX
-
-#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
-
-#define _MM_DENORMALS_ZERO_ON   (0x0040U)
-#define _MM_DENORMALS_ZERO_OFF  (0x0000U)
-
-#define _MM_DENORMALS_ZERO_MASK (0x0040U)
-
-#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
-#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
-
-#endif /* __EMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/float.h b/linux-x86/lib64/clang/14.0.2/include/float.h
deleted file mode 100644
index ed610b2..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/float.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/*===---- float.h - Characteristics of floating point types ----------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_FLOAT_H
-#define __CLANG_FLOAT_H
-
-/* If we're on MinGW, fall back to the system's float.h, which might have
- * additional definitions provided for Windows.
- * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
- *
- * Also fall back on Darwin to allow additional definitions and
- * implementation-defined values.
- */
-#if (defined(__APPLE__) || (defined(__MINGW32__) || defined(_MSC_VER))) && \
-    __STDC_HOSTED__ && __has_include_next(<float.h>)
-
-/* Prior to Apple's 10.7 SDK, float.h SDK header used to apply an extra level
- * of #include_next<float.h> to keep Metrowerks compilers happy. Avoid this
- * extra indirection.
- */
-#ifdef __APPLE__
-#define _FLOAT_H_
-#endif
-
-#  include_next <float.h>
-
-/* Undefine anything that we'll be redefining below. */
-#  undef FLT_EVAL_METHOD
-#  undef FLT_ROUNDS
-#  undef FLT_RADIX
-#  undef FLT_MANT_DIG
-#  undef DBL_MANT_DIG
-#  undef LDBL_MANT_DIG
-#  if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
-#    undef DECIMAL_DIG
-#  endif
-#  undef FLT_DIG
-#  undef DBL_DIG
-#  undef LDBL_DIG
-#  undef FLT_MIN_EXP
-#  undef DBL_MIN_EXP
-#  undef LDBL_MIN_EXP
-#  undef FLT_MIN_10_EXP
-#  undef DBL_MIN_10_EXP
-#  undef LDBL_MIN_10_EXP
-#  undef FLT_MAX_EXP
-#  undef DBL_MAX_EXP
-#  undef LDBL_MAX_EXP
-#  undef FLT_MAX_10_EXP
-#  undef DBL_MAX_10_EXP
-#  undef LDBL_MAX_10_EXP
-#  undef FLT_MAX
-#  undef DBL_MAX
-#  undef LDBL_MAX
-#  undef FLT_EPSILON
-#  undef DBL_EPSILON
-#  undef LDBL_EPSILON
-#  undef FLT_MIN
-#  undef DBL_MIN
-#  undef LDBL_MIN
-#  if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
-#    undef FLT_TRUE_MIN
-#    undef DBL_TRUE_MIN
-#    undef LDBL_TRUE_MIN
-#    undef FLT_DECIMAL_DIG
-#    undef DBL_DECIMAL_DIG
-#    undef LDBL_DECIMAL_DIG
-#    undef FLT_HAS_SUBNORM
-#    undef DBL_HAS_SUBNORM
-#    undef LDBL_HAS_SUBNORM
-#  endif
-#endif
-
-/* Characteristics of floating point types, C99 5.2.4.2.2 */
-
-#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
-#define FLT_ROUNDS (__builtin_flt_rounds())
-#define FLT_RADIX __FLT_RADIX__
-
-#define FLT_MANT_DIG __FLT_MANT_DIG__
-#define DBL_MANT_DIG __DBL_MANT_DIG__
-#define LDBL_MANT_DIG __LDBL_MANT_DIG__
-
-#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
-#  define DECIMAL_DIG __DECIMAL_DIG__
-#endif
-
-#define FLT_DIG __FLT_DIG__
-#define DBL_DIG __DBL_DIG__
-#define LDBL_DIG __LDBL_DIG__
-
-#define FLT_MIN_EXP __FLT_MIN_EXP__
-#define DBL_MIN_EXP __DBL_MIN_EXP__
-#define LDBL_MIN_EXP __LDBL_MIN_EXP__
-
-#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
-#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
-#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
-
-#define FLT_MAX_EXP __FLT_MAX_EXP__
-#define DBL_MAX_EXP __DBL_MAX_EXP__
-#define LDBL_MAX_EXP __LDBL_MAX_EXP__
-
-#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
-#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
-#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
-
-#define FLT_MAX __FLT_MAX__
-#define DBL_MAX __DBL_MAX__
-#define LDBL_MAX __LDBL_MAX__
-
-#define FLT_EPSILON __FLT_EPSILON__
-#define DBL_EPSILON __DBL_EPSILON__
-#define LDBL_EPSILON __LDBL_EPSILON__
-
-#define FLT_MIN __FLT_MIN__
-#define DBL_MIN __DBL_MIN__
-#define LDBL_MIN __LDBL_MIN__
-
-#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
-#  define FLT_TRUE_MIN __FLT_DENORM_MIN__
-#  define DBL_TRUE_MIN __DBL_DENORM_MIN__
-#  define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
-#  define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__
-#  define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__
-#  define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__
-#  define FLT_HAS_SUBNORM __FLT_HAS_DENORM__
-#  define DBL_HAS_SUBNORM __DBL_HAS_DENORM__
-#  define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
-#endif
-
-#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__
-#  define FLT16_MANT_DIG    __FLT16_MANT_DIG__
-#  define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__
-#  define FLT16_DIG         __FLT16_DIG__
-#  define FLT16_MIN_EXP     __FLT16_MIN_EXP__
-#  define FLT16_MIN_10_EXP  __FLT16_MIN_10_EXP__
-#  define FLT16_MAX_EXP     __FLT16_MAX_EXP__
-#  define FLT16_MAX_10_EXP  __FLT16_MAX_10_EXP__
-#  define FLT16_MAX         __FLT16_MAX__
-#  define FLT16_EPSILON     __FLT16_EPSILON__
-#  define FLT16_MIN         __FLT16_MIN__
-#  define FLT16_TRUE_MIN    __FLT16_TRUE_MIN__
-#endif /* __STDC_WANT_IEC_60559_TYPES_EXT__ */
-
-#endif /* __CLANG_FLOAT_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/hexagon_protos.h b/linux-x86/lib64/clang/14.0.2/include/hexagon_protos.h
deleted file mode 100644
index cdffd93..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/hexagon_protos.h
+++ /dev/null
@@ -1,8450 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-// Automatically generated file, do not edit!
-//===----------------------------------------------------------------------===//
-
-
-
-#ifndef __HEXAGON_PROTOS_H_
-#define __HEXAGON_PROTOS_H_ 1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=abs(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_abs_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_abs_R __builtin_HEXAGON_A2_abs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=abs(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_abs_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_abs_P __builtin_HEXAGON_A2_absp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=abs(Rs32):sat
-   C Intrinsic Prototype: Word32 Q6_R_abs_R_sat(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_abs_R_sat __builtin_HEXAGON_A2_abssat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_add_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_add_RR __builtin_HEXAGON_A2_add
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.h,Rs32.h):<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RhRh_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RhRh_s16 __builtin_HEXAGON_A2_addh_h16_hh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.h,Rs32.l):<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RhRl_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RhRl_s16 __builtin_HEXAGON_A2_addh_h16_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.h):<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRh_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRh_s16 __builtin_HEXAGON_A2_addh_h16_lh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.l):<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRl_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRl_s16 __builtin_HEXAGON_A2_addh_h16_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.h,Rs32.h):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RhRh_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RhRh_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_hh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.h,Rs32.l):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RhRl_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RhRl_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.h):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRh_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRh_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_lh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.l):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRl_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRl_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.h)
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRh(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRh __builtin_HEXAGON_A2_addh_l16_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.l)
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRl(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRl __builtin_HEXAGON_A2_addh_l16_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRh_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRh_sat __builtin_HEXAGON_A2_addh_l16_sat_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rt32.l,Rs32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_add_RlRl_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_RlRl_sat __builtin_HEXAGON_A2_addh_l16_sat_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rs32,#s16)
-   C Intrinsic Prototype: Word32 Q6_R_add_RI(Word32 Rs, Word32 Is16)
-   Instruction Type:      ALU32_ADDI
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_add_RI __builtin_HEXAGON_A2_addi
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=add(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_add_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_add_PP __builtin_HEXAGON_A2_addp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=add(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_add_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_add_PP_sat __builtin_HEXAGON_A2_addpsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_add_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_add_RR_sat __builtin_HEXAGON_A2_addsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=add(Rs32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_add_RP(Word32 Rs, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_add_RP __builtin_HEXAGON_A2_addsp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=and(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_and_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_and_RR __builtin_HEXAGON_A2_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=and(Rs32,#s10)
-   C Intrinsic Prototype: Word32 Q6_R_and_RI(Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_and_RI __builtin_HEXAGON_A2_andir
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=and(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_and_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_and_PP __builtin_HEXAGON_A2_andp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=aslh(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_aslh_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_aslh_R __builtin_HEXAGON_A2_aslh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asrh(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_asrh_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_asrh_R __builtin_HEXAGON_A2_asrh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=combine(Rt32.h,Rs32.h)
-   C Intrinsic Prototype: Word32 Q6_R_combine_RhRh(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_combine_RhRh __builtin_HEXAGON_A2_combine_hh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=combine(Rt32.h,Rs32.l)
-   C Intrinsic Prototype: Word32 Q6_R_combine_RhRl(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_combine_RhRl __builtin_HEXAGON_A2_combine_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=combine(Rt32.l,Rs32.h)
-   C Intrinsic Prototype: Word32 Q6_R_combine_RlRh(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_combine_RlRh __builtin_HEXAGON_A2_combine_lh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=combine(Rt32.l,Rs32.l)
-   C Intrinsic Prototype: Word32 Q6_R_combine_RlRl(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_combine_RlRl __builtin_HEXAGON_A2_combine_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=combine(#s8,#S8)
-   C Intrinsic Prototype: Word64 Q6_P_combine_II(Word32 Is8, Word32 IS8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_combine_II __builtin_HEXAGON_A2_combineii
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=combine(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_combine_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_combine_RR __builtin_HEXAGON_A2_combinew
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=max(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_max_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_max_RR __builtin_HEXAGON_A2_max
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=max(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_max_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_max_PP __builtin_HEXAGON_A2_maxp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=maxu(Rs32,Rt32)
-   C Intrinsic Prototype: UWord32 Q6_R_maxu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_maxu_RR __builtin_HEXAGON_A2_maxu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=maxu(Rss32,Rtt32)
-   C Intrinsic Prototype: UWord64 Q6_P_maxu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_maxu_PP __builtin_HEXAGON_A2_maxup
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=min(Rt32,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_min_RR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_min_RR __builtin_HEXAGON_A2_min
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=min(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_min_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_min_PP __builtin_HEXAGON_A2_minp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=minu(Rt32,Rs32)
-   C Intrinsic Prototype: UWord32 Q6_R_minu_RR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_minu_RR __builtin_HEXAGON_A2_minu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=minu(Rtt32,Rss32)
-   C Intrinsic Prototype: UWord64 Q6_P_minu_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_minu_PP __builtin_HEXAGON_A2_minup
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=neg(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_neg_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_neg_R __builtin_HEXAGON_A2_neg
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=neg(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_neg_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_neg_P __builtin_HEXAGON_A2_negp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=neg(Rs32):sat
-   C Intrinsic Prototype: Word32 Q6_R_neg_R_sat(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_neg_R_sat __builtin_HEXAGON_A2_negsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=not(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_not_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_not_R __builtin_HEXAGON_A2_not
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=not(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_not_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_not_P __builtin_HEXAGON_A2_notp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=or(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_or_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_or_RR __builtin_HEXAGON_A2_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=or(Rs32,#s10)
-   C Intrinsic Prototype: Word32 Q6_R_or_RI(Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_or_RI __builtin_HEXAGON_A2_orir
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=or(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_or_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_or_PP __builtin_HEXAGON_A2_orp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=round(Rss32):sat
-   C Intrinsic Prototype: Word32 Q6_R_round_P_sat(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_round_P_sat __builtin_HEXAGON_A2_roundsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sat(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_sat_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sat_P __builtin_HEXAGON_A2_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=satb(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_satb_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_satb_R __builtin_HEXAGON_A2_satb
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sath(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_sath_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sath_R __builtin_HEXAGON_A2_sath
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=satub(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_satub_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_satub_R __builtin_HEXAGON_A2_satub
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=satuh(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_satuh_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_satuh_R __builtin_HEXAGON_A2_satuh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_sub_RR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_sub_RR __builtin_HEXAGON_A2_sub
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.h,Rs32.h):<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RhRh_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RhRh_s16 __builtin_HEXAGON_A2_subh_h16_hh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.h,Rs32.l):<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RhRl_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RhRl_s16 __builtin_HEXAGON_A2_subh_h16_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.h):<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRh_s16 __builtin_HEXAGON_A2_subh_h16_lh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.l):<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRl_s16 __builtin_HEXAGON_A2_subh_h16_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.h,Rs32.h):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RhRh_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RhRh_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_hh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.h,Rs32.l):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RhRl_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RhRl_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.h):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRh_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_lh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.l):sat:<<16
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_sat_s16(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRl_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.h)
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRh(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRh __builtin_HEXAGON_A2_subh_l16_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.l)
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRl(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRl __builtin_HEXAGON_A2_subh_l16_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRh_sat __builtin_HEXAGON_A2_subh_l16_sat_hl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32.l,Rs32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_RlRl_sat __builtin_HEXAGON_A2_subh_l16_sat_ll
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=sub(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_sub_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_sub_PP __builtin_HEXAGON_A2_subp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(#s10,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_sub_IR(Word32 Is10, Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_sub_IR __builtin_HEXAGON_A2_subri
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sub(Rt32,Rs32):sat
-   C Intrinsic Prototype: Word32 Q6_R_sub_RR_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_sub_RR_sat __builtin_HEXAGON_A2_subsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vaddh(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_vaddh_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vaddh_RR __builtin_HEXAGON_A2_svaddh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vaddh(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_vaddh_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vaddh_RR_sat __builtin_HEXAGON_A2_svaddhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vadduh(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_vadduh_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vadduh_RR_sat __builtin_HEXAGON_A2_svadduhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vavgh(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_vavgh_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vavgh_RR __builtin_HEXAGON_A2_svavgh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vavgh(Rs32,Rt32):rnd
-   C Intrinsic Prototype: Word32 Q6_R_vavgh_RR_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vavgh_RR_rnd __builtin_HEXAGON_A2_svavghs
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vnavgh(Rt32,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_vnavgh_RR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vnavgh_RR __builtin_HEXAGON_A2_svnavgh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsubh(Rt32,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_vsubh_RR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vsubh_RR __builtin_HEXAGON_A2_svsubh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsubh(Rt32,Rs32):sat
-   C Intrinsic Prototype: Word32 Q6_R_vsubh_RR_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vsubh_RR_sat __builtin_HEXAGON_A2_svsubhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsubuh(Rt32,Rs32):sat
-   C Intrinsic Prototype: Word32 Q6_R_vsubuh_RR_sat(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vsubuh_RR_sat __builtin_HEXAGON_A2_svsubuhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=swiz(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_swiz_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_swiz_R __builtin_HEXAGON_A2_swiz
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sxtb(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_sxtb_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_sxtb_R __builtin_HEXAGON_A2_sxtb
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sxth(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_sxth_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_sxth_R __builtin_HEXAGON_A2_sxth
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=sxtw(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_sxtw_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_sxtw_R __builtin_HEXAGON_A2_sxtw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=Rs32
-   C Intrinsic Prototype: Word32 Q6_R_equals_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_equals_R __builtin_HEXAGON_A2_tfr
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32.h=#u16
-   C Intrinsic Prototype: Word32 Q6_Rh_equals_I(Word32 Rx, Word32 Iu16)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Rh_equals_I __builtin_HEXAGON_A2_tfrih
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32.l=#u16
-   C Intrinsic Prototype: Word32 Q6_Rl_equals_I(Word32 Rx, Word32 Iu16)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Rl_equals_I __builtin_HEXAGON_A2_tfril
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=Rss32
-   C Intrinsic Prototype: Word64 Q6_P_equals_P(Word64 Rss)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_equals_P __builtin_HEXAGON_A2_tfrp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=#s8
-   C Intrinsic Prototype: Word64 Q6_P_equals_I(Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_equals_I __builtin_HEXAGON_A2_tfrpi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=#s16
-   C Intrinsic Prototype: Word32 Q6_R_equals_I(Word32 Is16)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_equals_I __builtin_HEXAGON_A2_tfrsi
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsh(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vabsh_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsh_P __builtin_HEXAGON_A2_vabsh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsh(Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vabsh_P_sat(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsh_P_sat __builtin_HEXAGON_A2_vabshsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsw(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vabsw_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsw_P __builtin_HEXAGON_A2_vabsw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsw(Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vabsw_P_sat(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsw_P_sat __builtin_HEXAGON_A2_vabswsat
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddb(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vaddb_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_vaddb_PP __builtin_HEXAGON_A2_vaddb_map
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vaddh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaddh_PP __builtin_HEXAGON_A2_vaddh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vaddh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaddh_PP_sat __builtin_HEXAGON_A2_vaddhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddub(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vaddub_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaddub_PP __builtin_HEXAGON_A2_vaddub
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddub(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vaddub_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaddub_PP_sat __builtin_HEXAGON_A2_vaddubs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vadduh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vadduh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vadduh_PP_sat __builtin_HEXAGON_A2_vadduhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vaddw_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaddw_PP __builtin_HEXAGON_A2_vaddw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaddw(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vaddw_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaddw_PP_sat __builtin_HEXAGON_A2_vaddws
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vavgh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgh_PP __builtin_HEXAGON_A2_vavgh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgh(Rss32,Rtt32):crnd
-   C Intrinsic Prototype: Word64 Q6_P_vavgh_PP_crnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgh_PP_crnd __builtin_HEXAGON_A2_vavghcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgh(Rss32,Rtt32):rnd
-   C Intrinsic Prototype: Word64 Q6_P_vavgh_PP_rnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgh_PP_rnd __builtin_HEXAGON_A2_vavghr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgub(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vavgub_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgub_PP __builtin_HEXAGON_A2_vavgub
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgub(Rss32,Rtt32):rnd
-   C Intrinsic Prototype: Word64 Q6_P_vavgub_PP_rnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgub_PP_rnd __builtin_HEXAGON_A2_vavgubr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavguh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vavguh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavguh_PP __builtin_HEXAGON_A2_vavguh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavguh(Rss32,Rtt32):rnd
-   C Intrinsic Prototype: Word64 Q6_P_vavguh_PP_rnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavguh_PP_rnd __builtin_HEXAGON_A2_vavguhr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavguw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vavguw_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavguw_PP __builtin_HEXAGON_A2_vavguw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavguw(Rss32,Rtt32):rnd
-   C Intrinsic Prototype: Word64 Q6_P_vavguw_PP_rnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavguw_PP_rnd __builtin_HEXAGON_A2_vavguwr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vavgw_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgw_PP __builtin_HEXAGON_A2_vavgw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgw(Rss32,Rtt32):crnd
-   C Intrinsic Prototype: Word64 Q6_P_vavgw_PP_crnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgw_PP_crnd __builtin_HEXAGON_A2_vavgwcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vavgw(Rss32,Rtt32):rnd
-   C Intrinsic Prototype: Word64 Q6_P_vavgw_PP_rnd(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vavgw_PP_rnd __builtin_HEXAGON_A2_vavgwr
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpb.eq(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmpb_eq_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpb_eq_PP __builtin_HEXAGON_A2_vcmpbeq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpb.gtu(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmpb_gtu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpb_gtu_PP __builtin_HEXAGON_A2_vcmpbgtu
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmph.eq(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmph_eq_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmph_eq_PP __builtin_HEXAGON_A2_vcmpheq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmph.gt(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmph_gt_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmph_gt_PP __builtin_HEXAGON_A2_vcmphgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmph.gtu(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmph_gtu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmph_gtu_PP __builtin_HEXAGON_A2_vcmphgtu
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpw.eq(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmpw_eq_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpw_eq_PP __builtin_HEXAGON_A2_vcmpweq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpw.gt(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmpw_gt_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpw_gt_PP __builtin_HEXAGON_A2_vcmpwgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpw.gtu(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmpw_gtu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpw_gtu_PP __builtin_HEXAGON_A2_vcmpwgtu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vconj(Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vconj_P_sat(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vconj_P_sat __builtin_HEXAGON_A2_vconj
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmaxb(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vmaxb_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmaxb_PP __builtin_HEXAGON_A2_vmaxb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmaxh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vmaxh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmaxh_PP __builtin_HEXAGON_A2_vmaxh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmaxub(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vmaxub_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmaxub_PP __builtin_HEXAGON_A2_vmaxub
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmaxuh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vmaxuh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmaxuh_PP __builtin_HEXAGON_A2_vmaxuh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmaxuw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vmaxuw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmaxuw_PP __builtin_HEXAGON_A2_vmaxuw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmaxw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vmaxw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmaxw_PP __builtin_HEXAGON_A2_vmaxw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vminb(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vminb_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vminb_PP __builtin_HEXAGON_A2_vminb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vminh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vminh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vminh_PP __builtin_HEXAGON_A2_vminh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vminub(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vminub_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vminub_PP __builtin_HEXAGON_A2_vminub
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vminuh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vminuh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vminuh_PP __builtin_HEXAGON_A2_vminuh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vminuw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vminuw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vminuw_PP __builtin_HEXAGON_A2_vminuw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vminw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vminw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vminw_PP __builtin_HEXAGON_A2_vminw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vnavgh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vnavgh_PP __builtin_HEXAGON_A2_vnavgh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vnavgh(Rtt32,Rss32):crnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP_crnd_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vnavgh_PP_crnd_sat __builtin_HEXAGON_A2_vnavghcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vnavgh(Rtt32,Rss32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP_rnd_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vnavgh_PP_rnd_sat __builtin_HEXAGON_A2_vnavghr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vnavgw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vnavgw_PP __builtin_HEXAGON_A2_vnavgw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vnavgw(Rtt32,Rss32):crnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP_crnd_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vnavgw_PP_crnd_sat __builtin_HEXAGON_A2_vnavgwcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vnavgw(Rtt32,Rss32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP_rnd_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vnavgw_PP_rnd_sat __builtin_HEXAGON_A2_vnavgwr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vraddub(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vraddub_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vraddub_PP __builtin_HEXAGON_A2_vraddub
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vraddub(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vraddubacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vraddubacc_PP __builtin_HEXAGON_A2_vraddub_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrsadub(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrsadub_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrsadub_PP __builtin_HEXAGON_A2_vrsadub
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrsadub(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrsadubacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrsadubacc_PP __builtin_HEXAGON_A2_vrsadub_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubb(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vsubb_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_vsubb_PP __builtin_HEXAGON_A2_vsubb_map
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsubh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubh_PP __builtin_HEXAGON_A2_vsubh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubh(Rtt32,Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vsubh_PP_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubh_PP_sat __builtin_HEXAGON_A2_vsubhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubub(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsubub_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubub_PP __builtin_HEXAGON_A2_vsubub
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubub(Rtt32,Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vsubub_PP_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubub_PP_sat __builtin_HEXAGON_A2_vsububs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubuh(Rtt32,Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vsubuh_PP_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubuh_PP_sat __builtin_HEXAGON_A2_vsubuhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsubw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubw_PP __builtin_HEXAGON_A2_vsubw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsubw(Rtt32,Rss32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vsubw_PP_sat(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsubw_PP_sat __builtin_HEXAGON_A2_vsubws
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=xor(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_xor_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_xor_RR __builtin_HEXAGON_A2_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=xor(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_xor_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_xor_PP __builtin_HEXAGON_A2_xorp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=zxtb(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_zxtb_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_zxtb_R __builtin_HEXAGON_A2_zxtb
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=zxth(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_zxth_R(Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_zxth_R __builtin_HEXAGON_A2_zxth
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=and(Rt32,~Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_and_RnR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_and_RnR __builtin_HEXAGON_A4_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=and(Rtt32,~Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_and_PnP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_and_PnP __builtin_HEXAGON_A4_andnp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=bitsplit(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_bitsplit_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_bitsplit_RR __builtin_HEXAGON_A4_bitsplit
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=bitsplit(Rs32,#u5)
-   C Intrinsic Prototype: Word64 Q6_P_bitsplit_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_bitsplit_RI __builtin_HEXAGON_A4_bitspliti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=boundscheck(Rs32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_boundscheck_RP(Word32 Rs, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_boundscheck_RP __builtin_HEXAGON_A4_boundscheck
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmpb.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmpb_eq_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmpb_eq_RR __builtin_HEXAGON_A4_cmpbeq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmpb.eq(Rs32,#u8)
-   C Intrinsic Prototype: Byte Q6_p_cmpb_eq_RI(Word32 Rs, Word32 Iu8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmpb_eq_RI __builtin_HEXAGON_A4_cmpbeqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmpb.gt(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmpb_gt_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmpb_gt_RR __builtin_HEXAGON_A4_cmpbgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmpb.gt(Rs32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_cmpb_gt_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmpb_gt_RI __builtin_HEXAGON_A4_cmpbgti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmpb.gtu(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmpb_gtu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmpb_gtu_RR __builtin_HEXAGON_A4_cmpbgtu
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmpb.gtu(Rs32,#u7)
-   C Intrinsic Prototype: Byte Q6_p_cmpb_gtu_RI(Word32 Rs, Word32 Iu7)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmpb_gtu_RI __builtin_HEXAGON_A4_cmpbgtui
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmph.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmph_eq_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmph_eq_RR __builtin_HEXAGON_A4_cmpheq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmph.eq(Rs32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_cmph_eq_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmph_eq_RI __builtin_HEXAGON_A4_cmpheqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmph.gt(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmph_gt_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmph_gt_RR __builtin_HEXAGON_A4_cmphgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmph.gt(Rs32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_cmph_gt_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmph_gt_RI __builtin_HEXAGON_A4_cmphgti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmph.gtu(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmph_gtu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmph_gtu_RR __builtin_HEXAGON_A4_cmphgtu
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmph.gtu(Rs32,#u7)
-   C Intrinsic Prototype: Byte Q6_p_cmph_gtu_RI(Word32 Rs, Word32 Iu7)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmph_gtu_RI __builtin_HEXAGON_A4_cmphgtui
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=combine(#s8,Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_combine_IR(Word32 Is8, Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_combine_IR __builtin_HEXAGON_A4_combineir
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=combine(Rs32,#s8)
-   C Intrinsic Prototype: Word64 Q6_P_combine_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_combine_RI __builtin_HEXAGON_A4_combineri
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cround(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_cround_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cround_RI __builtin_HEXAGON_A4_cround_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cround(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_cround_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cround_RR __builtin_HEXAGON_A4_cround_rr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=modwrap(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_modwrap_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_modwrap_RR __builtin_HEXAGON_A4_modwrapu
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=or(Rt32,~Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_or_RnR(Word32 Rt, Word32 Rs)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_or_RnR __builtin_HEXAGON_A4_orn
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=or(Rtt32,~Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_or_PnP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_or_PnP __builtin_HEXAGON_A4_ornp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmp.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_cmp_eq_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_cmp_eq_RR __builtin_HEXAGON_A4_rcmpeq
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmp.eq(Rs32,#s8)
-   C Intrinsic Prototype: Word32 Q6_R_cmp_eq_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_cmp_eq_RI __builtin_HEXAGON_A4_rcmpeqi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=!cmp.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_not_cmp_eq_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_not_cmp_eq_RR __builtin_HEXAGON_A4_rcmpneq
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=!cmp.eq(Rs32,#s8)
-   C Intrinsic Prototype: Word32 Q6_R_not_cmp_eq_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_not_cmp_eq_RI __builtin_HEXAGON_A4_rcmpneqi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=round(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_round_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_round_RI __builtin_HEXAGON_A4_round_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=round(Rs32,#u5):sat
-   C Intrinsic Prototype: Word32 Q6_R_round_RI_sat(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_round_RI_sat __builtin_HEXAGON_A4_round_ri_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=round(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_round_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_round_RR __builtin_HEXAGON_A4_round_rr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=round(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_round_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_round_RR_sat __builtin_HEXAGON_A4_round_rr_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=tlbmatch(Rss32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_tlbmatch_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_tlbmatch_PR __builtin_HEXAGON_A4_tlbmatch
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=any8(vcmpb.eq(Rss32,Rtt32))
-   C Intrinsic Prototype: Byte Q6_p_any8_vcmpb_eq_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_any8_vcmpb_eq_PP __builtin_HEXAGON_A4_vcmpbeq_any
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpb.eq(Rss32,#u8)
-   C Intrinsic Prototype: Byte Q6_p_vcmpb_eq_PI(Word64 Rss, Word32 Iu8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpb_eq_PI __builtin_HEXAGON_A4_vcmpbeqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpb.gt(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_vcmpb_gt_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpb_gt_PP __builtin_HEXAGON_A4_vcmpbgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpb.gt(Rss32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_vcmpb_gt_PI(Word64 Rss, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpb_gt_PI __builtin_HEXAGON_A4_vcmpbgti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpb.gtu(Rss32,#u7)
-   C Intrinsic Prototype: Byte Q6_p_vcmpb_gtu_PI(Word64 Rss, Word32 Iu7)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpb_gtu_PI __builtin_HEXAGON_A4_vcmpbgtui
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmph.eq(Rss32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_vcmph_eq_PI(Word64 Rss, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmph_eq_PI __builtin_HEXAGON_A4_vcmpheqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmph.gt(Rss32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_vcmph_gt_PI(Word64 Rss, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmph_gt_PI __builtin_HEXAGON_A4_vcmphgti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmph.gtu(Rss32,#u7)
-   C Intrinsic Prototype: Byte Q6_p_vcmph_gtu_PI(Word64 Rss, Word32 Iu7)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmph_gtu_PI __builtin_HEXAGON_A4_vcmphgtui
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpw.eq(Rss32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_vcmpw_eq_PI(Word64 Rss, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpw_eq_PI __builtin_HEXAGON_A4_vcmpweqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpw.gt(Rss32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_vcmpw_gt_PI(Word64 Rss, Word32 Is8)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpw_gt_PI __builtin_HEXAGON_A4_vcmpwgti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=vcmpw.gtu(Rss32,#u7)
-   C Intrinsic Prototype: Byte Q6_p_vcmpw_gtu_PI(Word64 Rss, Word32 Iu7)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_vcmpw_gtu_PI __builtin_HEXAGON_A4_vcmpwgtui
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrmaxh(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmaxh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmaxh_PR __builtin_HEXAGON_A4_vrmaxh
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrmaxuh(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmaxuh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmaxuh_PR __builtin_HEXAGON_A4_vrmaxuh
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrmaxuw(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmaxuw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmaxuw_PR __builtin_HEXAGON_A4_vrmaxuw
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrmaxw(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmaxw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmaxw_PR __builtin_HEXAGON_A4_vrmaxw
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrminh(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrminh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrminh_PR __builtin_HEXAGON_A4_vrminh
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrminuh(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrminuh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrminuh_PR __builtin_HEXAGON_A4_vrminuh
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrminuw(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrminuw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrminuw_PR __builtin_HEXAGON_A4_vrminuw
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=vrminw(Rss32,Ru32)
-   C Intrinsic Prototype: Word64 Q6_P_vrminw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrminw_PR __builtin_HEXAGON_A4_vrminw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vaddhub(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_vaddhub_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vaddhub_PP_sat __builtin_HEXAGON_A5_vaddhubs
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=all8(Ps4)
-   C Intrinsic Prototype: Byte Q6_p_all8_p(Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_all8_p __builtin_HEXAGON_C2_all8
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=and(Pt4,Ps4)
-   C Intrinsic Prototype: Byte Q6_p_and_pp(Byte Pt, Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_and_pp __builtin_HEXAGON_C2_and
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=and(Pt4,!Ps4)
-   C Intrinsic Prototype: Byte Q6_p_and_pnp(Byte Pt, Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_and_pnp __builtin_HEXAGON_C2_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=any8(Ps4)
-   C Intrinsic Prototype: Byte Q6_p_any8_p(Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_any8_p __builtin_HEXAGON_C2_any8
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=bitsclr(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_bitsclr_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_bitsclr_RR __builtin_HEXAGON_C2_bitsclr
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=bitsclr(Rs32,#u6)
-   C Intrinsic Prototype: Byte Q6_p_bitsclr_RI(Word32 Rs, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_bitsclr_RI __builtin_HEXAGON_C2_bitsclri
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=bitsset(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_bitsset_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_bitsset_RR __builtin_HEXAGON_C2_bitsset
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_eq_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_eq_RR __builtin_HEXAGON_C2_cmpeq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.eq(Rs32,#s10)
-   C Intrinsic Prototype: Byte Q6_p_cmp_eq_RI(Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_eq_RI __builtin_HEXAGON_C2_cmpeqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.eq(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_eq_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmp_eq_PP __builtin_HEXAGON_C2_cmpeqp
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.ge(Rs32,#s8)
-   C Intrinsic Prototype: Byte Q6_p_cmp_ge_RI(Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_ge_RI __builtin_HEXAGON_C2_cmpgei
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.geu(Rs32,#u8)
-   C Intrinsic Prototype: Byte Q6_p_cmp_geu_RI(Word32 Rs, Word32 Iu8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_geu_RI __builtin_HEXAGON_C2_cmpgeui
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.gt(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_gt_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_gt_RR __builtin_HEXAGON_C2_cmpgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.gt(Rs32,#s10)
-   C Intrinsic Prototype: Byte Q6_p_cmp_gt_RI(Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_gt_RI __builtin_HEXAGON_C2_cmpgti
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.gt(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_gt_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmp_gt_PP __builtin_HEXAGON_C2_cmpgtp
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.gtu(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_gtu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_gtu_RR __builtin_HEXAGON_C2_cmpgtu
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.gtu(Rs32,#u9)
-   C Intrinsic Prototype: Byte Q6_p_cmp_gtu_RI(Word32 Rs, Word32 Iu9)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_gtu_RI __builtin_HEXAGON_C2_cmpgtui
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.gtu(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_gtu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_cmp_gtu_PP __builtin_HEXAGON_C2_cmpgtup
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.lt(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_lt_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_lt_RR __builtin_HEXAGON_C2_cmplt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=cmp.ltu(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_cmp_ltu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_cmp_ltu_RR __builtin_HEXAGON_C2_cmpltu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mask(Pt4)
-   C Intrinsic Prototype: Word64 Q6_P_mask_p(Byte Pt)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mask_p __builtin_HEXAGON_C2_mask
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mux(Pu4,Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mux_pRR(Byte Pu, Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_mux_pRR __builtin_HEXAGON_C2_mux
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mux(Pu4,#s8,#S8)
-   C Intrinsic Prototype: Word32 Q6_R_mux_pII(Byte Pu, Word32 Is8, Word32 IS8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_mux_pII __builtin_HEXAGON_C2_muxii
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mux(Pu4,Rs32,#s8)
-   C Intrinsic Prototype: Word32 Q6_R_mux_pRI(Byte Pu, Word32 Rs, Word32 Is8)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_mux_pRI __builtin_HEXAGON_C2_muxir
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mux(Pu4,#s8,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_mux_pIR(Byte Pu, Word32 Is8, Word32 Rs)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_mux_pIR __builtin_HEXAGON_C2_muxri
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=not(Ps4)
-   C Intrinsic Prototype: Byte Q6_p_not_p(Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_p __builtin_HEXAGON_C2_not
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=or(Pt4,Ps4)
-   C Intrinsic Prototype: Byte Q6_p_or_pp(Byte Pt, Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_or_pp __builtin_HEXAGON_C2_or
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=or(Pt4,!Ps4)
-   C Intrinsic Prototype: Byte Q6_p_or_pnp(Byte Pt, Byte Ps)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_or_pnp __builtin_HEXAGON_C2_orn
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=Ps4
-   C Intrinsic Prototype: Byte Q6_p_equals_p(Byte Ps)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_equals_p __builtin_HEXAGON_C2_pxfer_map
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=Ps4
-   C Intrinsic Prototype: Word32 Q6_R_equals_p(Byte Ps)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_equals_p __builtin_HEXAGON_C2_tfrpr
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=Rs32
-   C Intrinsic Prototype: Byte Q6_p_equals_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_equals_R __builtin_HEXAGON_C2_tfrrp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vitpack(Ps4,Pt4)
-   C Intrinsic Prototype: Word32 Q6_R_vitpack_pp(Byte Ps, Byte Pt)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vitpack_pp __builtin_HEXAGON_C2_vitpack
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmux(Pu4,Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmux_pPP(Byte Pu, Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmux_pPP __builtin_HEXAGON_C2_vmux
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=xor(Ps4,Pt4)
-   C Intrinsic Prototype: Byte Q6_p_xor_pp(Byte Ps, Byte Pt)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_xor_pp __builtin_HEXAGON_C2_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=and(Ps4,and(Pt4,Pu4))
-   C Intrinsic Prototype: Byte Q6_p_and_and_ppp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_and_and_ppp __builtin_HEXAGON_C4_and_and
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=and(Ps4,and(Pt4,!Pu4))
-   C Intrinsic Prototype: Byte Q6_p_and_and_ppnp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_and_and_ppnp __builtin_HEXAGON_C4_and_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=and(Ps4,or(Pt4,Pu4))
-   C Intrinsic Prototype: Byte Q6_p_and_or_ppp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_and_or_ppp __builtin_HEXAGON_C4_and_or
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=and(Ps4,or(Pt4,!Pu4))
-   C Intrinsic Prototype: Byte Q6_p_and_or_ppnp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_and_or_ppnp __builtin_HEXAGON_C4_and_orn
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!cmp.gt(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_not_cmp_gt_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_not_cmp_gt_RR __builtin_HEXAGON_C4_cmplte
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!cmp.gt(Rs32,#s10)
-   C Intrinsic Prototype: Byte Q6_p_not_cmp_gt_RI(Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_not_cmp_gt_RI __builtin_HEXAGON_C4_cmpltei
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!cmp.gtu(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_not_cmp_gtu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_not_cmp_gtu_RR __builtin_HEXAGON_C4_cmplteu
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!cmp.gtu(Rs32,#u9)
-   C Intrinsic Prototype: Byte Q6_p_not_cmp_gtu_RI(Word32 Rs, Word32 Iu9)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_not_cmp_gtu_RI __builtin_HEXAGON_C4_cmplteui
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!cmp.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_not_cmp_eq_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_not_cmp_eq_RR __builtin_HEXAGON_C4_cmpneq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!cmp.eq(Rs32,#s10)
-   C Intrinsic Prototype: Byte Q6_p_not_cmp_eq_RI(Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU32_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_p_not_cmp_eq_RI __builtin_HEXAGON_C4_cmpneqi
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=fastcorner9(Ps4,Pt4)
-   C Intrinsic Prototype: Byte Q6_p_fastcorner9_pp(Byte Ps, Byte Pt)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_fastcorner9_pp __builtin_HEXAGON_C4_fastcorner9
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!fastcorner9(Ps4,Pt4)
-   C Intrinsic Prototype: Byte Q6_p_not_fastcorner9_pp(Byte Ps, Byte Pt)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_fastcorner9_pp __builtin_HEXAGON_C4_fastcorner9_not
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!bitsclr(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_not_bitsclr_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_bitsclr_RR __builtin_HEXAGON_C4_nbitsclr
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!bitsclr(Rs32,#u6)
-   C Intrinsic Prototype: Byte Q6_p_not_bitsclr_RI(Word32 Rs, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_bitsclr_RI __builtin_HEXAGON_C4_nbitsclri
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!bitsset(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_not_bitsset_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_bitsset_RR __builtin_HEXAGON_C4_nbitsset
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=or(Ps4,and(Pt4,Pu4))
-   C Intrinsic Prototype: Byte Q6_p_or_and_ppp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_or_and_ppp __builtin_HEXAGON_C4_or_and
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=or(Ps4,and(Pt4,!Pu4))
-   C Intrinsic Prototype: Byte Q6_p_or_and_ppnp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_or_and_ppnp __builtin_HEXAGON_C4_or_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=or(Ps4,or(Pt4,Pu4))
-   C Intrinsic Prototype: Byte Q6_p_or_or_ppp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_or_or_ppp __builtin_HEXAGON_C4_or_or
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=or(Ps4,or(Pt4,!Pu4))
-   C Intrinsic Prototype: Byte Q6_p_or_or_ppnp(Byte Ps, Byte Pt, Byte Pu)
-   Instruction Type:      CR
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_or_or_ppnp __builtin_HEXAGON_C4_or_orn
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_d2df(Rss32)
-   C Intrinsic Prototype: Float64 Q6_P_convert_d2df_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_d2df_P __builtin_HEXAGON_F2_conv_d2df
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_d2sf(Rss32)
-   C Intrinsic Prototype: Float32 Q6_R_convert_d2sf_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_d2sf_P __builtin_HEXAGON_F2_conv_d2sf
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_df2d(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_convert_df2d_P(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_df2d_P __builtin_HEXAGON_F2_conv_df2d
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_df2d(Rss32):chop
-   C Intrinsic Prototype: Word64 Q6_P_convert_df2d_P_chop(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_df2d_P_chop __builtin_HEXAGON_F2_conv_df2d_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_df2sf(Rss32)
-   C Intrinsic Prototype: Float32 Q6_R_convert_df2sf_P(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_df2sf_P __builtin_HEXAGON_F2_conv_df2sf
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_df2ud(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_convert_df2ud_P(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_df2ud_P __builtin_HEXAGON_F2_conv_df2ud
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_df2ud(Rss32):chop
-   C Intrinsic Prototype: Word64 Q6_P_convert_df2ud_P_chop(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_df2ud_P_chop __builtin_HEXAGON_F2_conv_df2ud_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_df2uw(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_convert_df2uw_P(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_df2uw_P __builtin_HEXAGON_F2_conv_df2uw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_df2uw(Rss32):chop
-   C Intrinsic Prototype: Word32 Q6_R_convert_df2uw_P_chop(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_df2uw_P_chop __builtin_HEXAGON_F2_conv_df2uw_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_df2w(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_convert_df2w_P(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_df2w_P __builtin_HEXAGON_F2_conv_df2w
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_df2w(Rss32):chop
-   C Intrinsic Prototype: Word32 Q6_R_convert_df2w_P_chop(Float64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_df2w_P_chop __builtin_HEXAGON_F2_conv_df2w_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_sf2d(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_convert_sf2d_R(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_sf2d_R __builtin_HEXAGON_F2_conv_sf2d
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_sf2d(Rs32):chop
-   C Intrinsic Prototype: Word64 Q6_P_convert_sf2d_R_chop(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_sf2d_R_chop __builtin_HEXAGON_F2_conv_sf2d_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_sf2df(Rs32)
-   C Intrinsic Prototype: Float64 Q6_P_convert_sf2df_R(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_sf2df_R __builtin_HEXAGON_F2_conv_sf2df
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_sf2ud(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_convert_sf2ud_R(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_sf2ud_R __builtin_HEXAGON_F2_conv_sf2ud
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_sf2ud(Rs32):chop
-   C Intrinsic Prototype: Word64 Q6_P_convert_sf2ud_R_chop(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_sf2ud_R_chop __builtin_HEXAGON_F2_conv_sf2ud_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_sf2uw(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_convert_sf2uw_R(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_sf2uw_R __builtin_HEXAGON_F2_conv_sf2uw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_sf2uw(Rs32):chop
-   C Intrinsic Prototype: Word32 Q6_R_convert_sf2uw_R_chop(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_sf2uw_R_chop __builtin_HEXAGON_F2_conv_sf2uw_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_sf2w(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_convert_sf2w_R(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_sf2w_R __builtin_HEXAGON_F2_conv_sf2w
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_sf2w(Rs32):chop
-   C Intrinsic Prototype: Word32 Q6_R_convert_sf2w_R_chop(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_sf2w_R_chop __builtin_HEXAGON_F2_conv_sf2w_chop
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_ud2df(Rss32)
-   C Intrinsic Prototype: Float64 Q6_P_convert_ud2df_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_ud2df_P __builtin_HEXAGON_F2_conv_ud2df
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_ud2sf(Rss32)
-   C Intrinsic Prototype: Float32 Q6_R_convert_ud2sf_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_ud2sf_P __builtin_HEXAGON_F2_conv_ud2sf
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_uw2df(Rs32)
-   C Intrinsic Prototype: Float64 Q6_P_convert_uw2df_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_uw2df_R __builtin_HEXAGON_F2_conv_uw2df
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_uw2sf(Rs32)
-   C Intrinsic Prototype: Float32 Q6_R_convert_uw2sf_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_uw2sf_R __builtin_HEXAGON_F2_conv_uw2sf
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=convert_w2df(Rs32)
-   C Intrinsic Prototype: Float64 Q6_P_convert_w2df_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_convert_w2df_R __builtin_HEXAGON_F2_conv_w2df
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=convert_w2sf(Rs32)
-   C Intrinsic Prototype: Float32 Q6_R_convert_w2sf_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_convert_w2sf_R __builtin_HEXAGON_F2_conv_w2sf
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=dfclass(Rss32,#u5)
-   C Intrinsic Prototype: Byte Q6_p_dfclass_PI(Float64 Rss, Word32 Iu5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_dfclass_PI __builtin_HEXAGON_F2_dfclass
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=dfcmp.eq(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_dfcmp_eq_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_dfcmp_eq_PP __builtin_HEXAGON_F2_dfcmpeq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=dfcmp.ge(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_dfcmp_ge_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_dfcmp_ge_PP __builtin_HEXAGON_F2_dfcmpge
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=dfcmp.gt(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_dfcmp_gt_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_dfcmp_gt_PP __builtin_HEXAGON_F2_dfcmpgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=dfcmp.uo(Rss32,Rtt32)
-   C Intrinsic Prototype: Byte Q6_p_dfcmp_uo_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_dfcmp_uo_PP __builtin_HEXAGON_F2_dfcmpuo
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfmake(#u10):neg
-   C Intrinsic Prototype: Float64 Q6_P_dfmake_I_neg(Word32 Iu10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmake_I_neg __builtin_HEXAGON_F2_dfimm_n
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfmake(#u10):pos
-   C Intrinsic Prototype: Float64 Q6_P_dfmake_I_pos(Word32 Iu10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmake_I_pos __builtin_HEXAGON_F2_dfimm_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfadd(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfadd_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfadd_RR __builtin_HEXAGON_F2_sfadd
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=sfclass(Rs32,#u5)
-   C Intrinsic Prototype: Byte Q6_p_sfclass_RI(Float32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_sfclass_RI __builtin_HEXAGON_F2_sfclass
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=sfcmp.eq(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_sfcmp_eq_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_sfcmp_eq_RR __builtin_HEXAGON_F2_sfcmpeq
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=sfcmp.ge(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_sfcmp_ge_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_sfcmp_ge_RR __builtin_HEXAGON_F2_sfcmpge
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=sfcmp.gt(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_sfcmp_gt_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_sfcmp_gt_RR __builtin_HEXAGON_F2_sfcmpgt
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=sfcmp.uo(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_sfcmp_uo_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_sfcmp_uo_RR __builtin_HEXAGON_F2_sfcmpuo
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sffixupd(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sffixupd_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sffixupd_RR __builtin_HEXAGON_F2_sffixupd
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sffixupn(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sffixupn_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sffixupn_RR __builtin_HEXAGON_F2_sffixupn
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sffixupr(Rs32)
-   C Intrinsic Prototype: Float32 Q6_R_sffixupr_R(Float32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sffixupr_R __builtin_HEXAGON_F2_sffixupr
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=sfmpy(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RR(Float32 Rx, Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmpyacc_RR __builtin_HEXAGON_F2_sffma
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=sfmpy(Rs32,Rt32):lib
-   C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RR_lib(Float32 Rx, Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmpyacc_RR_lib __builtin_HEXAGON_F2_sffma_lib
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=sfmpy(Rs32,Rt32,Pu4):scale
-   C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RRp_scale(Float32 Rx, Float32 Rs, Float32 Rt, Byte Pu)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmpyacc_RRp_scale __builtin_HEXAGON_F2_sffma_sc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=sfmpy(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfmpynac_RR(Float32 Rx, Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmpynac_RR __builtin_HEXAGON_F2_sffms
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=sfmpy(Rs32,Rt32):lib
-   C Intrinsic Prototype: Float32 Q6_R_sfmpynac_RR_lib(Float32 Rx, Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmpynac_RR_lib __builtin_HEXAGON_F2_sffms_lib
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfmake(#u10):neg
-   C Intrinsic Prototype: Float32 Q6_R_sfmake_I_neg(Word32 Iu10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmake_I_neg __builtin_HEXAGON_F2_sfimm_n
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfmake(#u10):pos
-   C Intrinsic Prototype: Float32 Q6_R_sfmake_I_pos(Word32 Iu10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmake_I_pos __builtin_HEXAGON_F2_sfimm_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfmax(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfmax_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmax_RR __builtin_HEXAGON_F2_sfmax
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfmin(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfmin_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmin_RR __builtin_HEXAGON_F2_sfmin
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfmpy(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfmpy_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfmpy_RR __builtin_HEXAGON_F2_sfmpy
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=sfsub(Rs32,Rt32)
-   C Intrinsic Prototype: Float32 Q6_R_sfsub_RR(Float32 Rs, Float32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sfsub_RR __builtin_HEXAGON_F2_sfsub
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memb(Rx32++#s4:0:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memb_IM_circ(void** Rx, Word32 Is4_0, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memb_IM_circ __builtin_HEXAGON_L2_loadrb_pci
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memb(Rx32++I:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memb_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memb_M_circ __builtin_HEXAGON_L2_loadrb_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=memd(Rx32++#s4:3:circ(Mu2))
-   C Intrinsic Prototype: Word64 Q6_P_memd_IM_circ(void** Rx, Word32 Is4_3, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_P_memd_IM_circ __builtin_HEXAGON_L2_loadrd_pci
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=memd(Rx32++I:circ(Mu2))
-   C Intrinsic Prototype: Word64 Q6_P_memd_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_P_memd_M_circ __builtin_HEXAGON_L2_loadrd_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memh(Rx32++#s4:1:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memh_IM_circ(void** Rx, Word32 Is4_1, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memh_IM_circ __builtin_HEXAGON_L2_loadrh_pci
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memh(Rx32++I:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memh_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memh_M_circ __builtin_HEXAGON_L2_loadrh_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memw(Rx32++#s4:2:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memw_IM_circ(void** Rx, Word32 Is4_2, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memw_IM_circ __builtin_HEXAGON_L2_loadri_pci
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memw(Rx32++I:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memw_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memw_M_circ __builtin_HEXAGON_L2_loadri_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memub(Rx32++#s4:0:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memub_IM_circ(void** Rx, Word32 Is4_0, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memub_IM_circ __builtin_HEXAGON_L2_loadrub_pci
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memub(Rx32++I:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memub_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memub_M_circ __builtin_HEXAGON_L2_loadrub_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memuh(Rx32++#s4:1:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memuh_IM_circ(void** Rx, Word32 Is4_1, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memuh_IM_circ __builtin_HEXAGON_L2_loadruh_pci
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=memuh(Rx32++I:circ(Mu2))
-   C Intrinsic Prototype: Word32 Q6_R_memuh_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
-   Instruction Type:      LD
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_R_memuh_M_circ __builtin_HEXAGON_L2_loadruh_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=add(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_addacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_addacc_RR __builtin_HEXAGON_M2_acci
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=add(Rs32,#s8)
-   C Intrinsic Prototype: Word32 Q6_R_addacc_RI(Word32 Rx, Word32 Rs, Word32 Is8)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_addacc_RI __builtin_HEXAGON_M2_accii
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpyi(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyiacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyiacc_RR __builtin_HEXAGON_M2_cmaci_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpyr(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyracc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyracc_RR __builtin_HEXAGON_M2_cmacr_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpy(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyacc_RR_sat __builtin_HEXAGON_M2_cmacs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpy(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyacc_RR_s1_sat __builtin_HEXAGON_M2_cmacs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpy(Rs32,Rt32*):sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_conj_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyacc_RR_conj_sat __builtin_HEXAGON_M2_cmacsc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpy(Rs32,Rt32*):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_conj_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyacc_RR_conj_s1_sat __builtin_HEXAGON_M2_cmacsc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpyi(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyi_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyi_RR __builtin_HEXAGON_M2_cmpyi_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpyr(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyr_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpyr_RR __builtin_HEXAGON_M2_cmpyr_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpy(Rs32,Rt32):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpy_RR_rnd_sat __builtin_HEXAGON_M2_cmpyrs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpy(Rs32,Rt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpy_RR_s1_rnd_sat __builtin_HEXAGON_M2_cmpyrs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpy(Rs32,Rt32*):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_conj_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpy_RR_conj_rnd_sat __builtin_HEXAGON_M2_cmpyrsc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpy(Rs32,Rt32*):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_conj_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpy_RR_conj_s1_rnd_sat __builtin_HEXAGON_M2_cmpyrsc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpy(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpy_RR_sat __builtin_HEXAGON_M2_cmpys_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpy(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpy_RR_s1_sat __builtin_HEXAGON_M2_cmpys_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpy(Rs32,Rt32*):sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_conj_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpy_RR_conj_sat __builtin_HEXAGON_M2_cmpysc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpy(Rs32,Rt32*):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_conj_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpy_RR_conj_s1_sat __builtin_HEXAGON_M2_cmpysc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=cmpy(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpynac_RR_sat __builtin_HEXAGON_M2_cnacs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=cmpy(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpynac_RR_s1_sat __builtin_HEXAGON_M2_cnacs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=cmpy(Rs32,Rt32*):sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_conj_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpynac_RR_conj_sat __builtin_HEXAGON_M2_cnacsc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=cmpy(Rs32,Rt32*):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_conj_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cmpynac_RR_conj_s1_sat __builtin_HEXAGON_M2_cnacsc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RR __builtin_HEXAGON_M2_dpmpyss_acc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RR __builtin_HEXAGON_M2_dpmpyss_nac_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32):rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RR_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RR_rnd __builtin_HEXAGON_M2_dpmpyss_rnd_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RR __builtin_HEXAGON_M2_dpmpyss_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RR __builtin_HEXAGON_M2_dpmpyuu_acc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RR __builtin_HEXAGON_M2_dpmpyuu_nac_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32,Rt32)
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RR __builtin_HEXAGON_M2_dpmpyuu_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32.h):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RRh_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RRh_s1_rnd_sat __builtin_HEXAGON_M2_hmmpyh_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RRh_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RRh_s1_sat __builtin_HEXAGON_M2_hmmpyh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32.l):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RRl_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RRl_s1_rnd_sat __builtin_HEXAGON_M2_hmmpyl_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RRl_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RRl_s1_sat __builtin_HEXAGON_M2_hmmpyl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyi(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mpyiacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyiacc_RR __builtin_HEXAGON_M2_maci
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyi(Rs32,#u8)
-   C Intrinsic Prototype: Word32 Q6_R_mpyinac_RI(Word32 Rx, Word32 Rs, Word32 Iu8)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyinac_RI __builtin_HEXAGON_M2_macsin
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyi(Rs32,#u8)
-   C Intrinsic Prototype: Word32 Q6_R_mpyiacc_RI(Word32 Rx, Word32 Rs, Word32 Iu8)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyiacc_RI __builtin_HEXAGON_M2_macsip
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywoh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywohacc_PP_rnd_sat __builtin_HEXAGON_M2_mmachs_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywoh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywohacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmachs_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywoh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywohacc_PP_sat __builtin_HEXAGON_M2_mmachs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywoh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywohacc_PP_s1_sat __builtin_HEXAGON_M2_mmachs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywehacc_PP_rnd_sat __builtin_HEXAGON_M2_mmacls_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywehacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmacls_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywehacc_PP_sat __builtin_HEXAGON_M2_mmacls_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywehacc_PP_s1_sat __builtin_HEXAGON_M2_mmacls_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywouh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouhacc_PP_rnd_sat __builtin_HEXAGON_M2_mmacuhs_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywouh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouhacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmacuhs_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywouh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouhacc_PP_sat __builtin_HEXAGON_M2_mmacuhs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpywouh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouhacc_PP_s1_sat __builtin_HEXAGON_M2_mmacuhs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweuh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuhacc_PP_rnd_sat __builtin_HEXAGON_M2_mmaculs_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuhacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmaculs_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweuh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuhacc_PP_sat __builtin_HEXAGON_M2_mmaculs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuhacc_PP_s1_sat __builtin_HEXAGON_M2_mmaculs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywoh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywoh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyh_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywoh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywoh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyh_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywoh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywoh_PP_sat __builtin_HEXAGON_M2_mmpyh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywoh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywoh_PP_s1_sat __builtin_HEXAGON_M2_mmpyh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyl_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyl_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweh_PP_sat __builtin_HEXAGON_M2_mmpyl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweh_PP_s1_sat __builtin_HEXAGON_M2_mmpyl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywouh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyuh_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywouh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyuh_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywouh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouh_PP_sat __builtin_HEXAGON_M2_mmpyuh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpywouh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpywouh_PP_s1_sat __builtin_HEXAGON_M2_mmpyuh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweuh(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyul_rs0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyul_rs1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweuh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuh_PP_sat __builtin_HEXAGON_M2_mmpyul_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyweuh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyweuh_PP_s1_sat __builtin_HEXAGON_M2_mmpyul_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRh __builtin_HEXAGON_M2_mpy_acc_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRh_s1 __builtin_HEXAGON_M2_mpy_acc_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRl __builtin_HEXAGON_M2_mpy_acc_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRl_s1 __builtin_HEXAGON_M2_mpy_acc_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRh __builtin_HEXAGON_M2_mpy_acc_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRh_s1 __builtin_HEXAGON_M2_mpy_acc_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRl __builtin_HEXAGON_M2_mpy_acc_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRl_s1 __builtin_HEXAGON_M2_mpy_acc_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRh_sat __builtin_HEXAGON_M2_mpy_acc_sat_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRl_sat __builtin_HEXAGON_M2_mpy_acc_sat_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.h,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRh_sat __builtin_HEXAGON_M2_mpy_acc_sat_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRl_sat __builtin_HEXAGON_M2_mpy_acc_sat_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32.l,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh __builtin_HEXAGON_M2_mpy_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_s1 __builtin_HEXAGON_M2_mpy_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl __builtin_HEXAGON_M2_mpy_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_s1 __builtin_HEXAGON_M2_mpy_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh __builtin_HEXAGON_M2_mpy_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_s1 __builtin_HEXAGON_M2_mpy_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl __builtin_HEXAGON_M2_mpy_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_s1 __builtin_HEXAGON_M2_mpy_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRh __builtin_HEXAGON_M2_mpy_nac_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRh_s1 __builtin_HEXAGON_M2_mpy_nac_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRl __builtin_HEXAGON_M2_mpy_nac_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRl_s1 __builtin_HEXAGON_M2_mpy_nac_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRh __builtin_HEXAGON_M2_mpy_nac_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRh_s1 __builtin_HEXAGON_M2_mpy_nac_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRl __builtin_HEXAGON_M2_mpy_nac_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRl_s1 __builtin_HEXAGON_M2_mpy_nac_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRh_sat __builtin_HEXAGON_M2_mpy_nac_sat_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRl_sat __builtin_HEXAGON_M2_mpy_nac_sat_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.h,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRh_sat __builtin_HEXAGON_M2_mpy_nac_sat_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRl_sat __builtin_HEXAGON_M2_mpy_nac_sat_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32.l,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_rnd __builtin_HEXAGON_M2_mpy_rnd_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_rnd __builtin_HEXAGON_M2_mpy_rnd_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_rnd __builtin_HEXAGON_M2_mpy_rnd_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_rnd __builtin_HEXAGON_M2_mpy_rnd_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_sat __builtin_HEXAGON_M2_mpy_sat_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_sat_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_sat __builtin_HEXAGON_M2_mpy_sat_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_sat_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_sat __builtin_HEXAGON_M2_mpy_sat_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_sat_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_sat __builtin_HEXAGON_M2_mpy_sat_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_sat_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRh_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RhRl_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRh_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RlRl_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RR __builtin_HEXAGON_M2_mpy_up
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RR_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RR_s1 __builtin_HEXAGON_M2_mpy_up_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpy(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpy_RR_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpy_RR_s1_sat __builtin_HEXAGON_M2_mpy_up_s1_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RhRh __builtin_HEXAGON_M2_mpyd_acc_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RhRh_s1 __builtin_HEXAGON_M2_mpyd_acc_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RhRl __builtin_HEXAGON_M2_mpyd_acc_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RhRl_s1 __builtin_HEXAGON_M2_mpyd_acc_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RlRh __builtin_HEXAGON_M2_mpyd_acc_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RlRh_s1 __builtin_HEXAGON_M2_mpyd_acc_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RlRl __builtin_HEXAGON_M2_mpyd_acc_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpy(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyacc_RlRl_s1 __builtin_HEXAGON_M2_mpyd_acc_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRh __builtin_HEXAGON_M2_mpyd_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRh_s1 __builtin_HEXAGON_M2_mpyd_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRl __builtin_HEXAGON_M2_mpyd_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRl_s1 __builtin_HEXAGON_M2_mpyd_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRh __builtin_HEXAGON_M2_mpyd_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRh_s1 __builtin_HEXAGON_M2_mpyd_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRl __builtin_HEXAGON_M2_mpyd_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRl_s1 __builtin_HEXAGON_M2_mpyd_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RhRh __builtin_HEXAGON_M2_mpyd_nac_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RhRh_s1 __builtin_HEXAGON_M2_mpyd_nac_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RhRl __builtin_HEXAGON_M2_mpyd_nac_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RhRl_s1 __builtin_HEXAGON_M2_mpyd_nac_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RlRh __builtin_HEXAGON_M2_mpyd_nac_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RlRh_s1 __builtin_HEXAGON_M2_mpyd_nac_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RlRl __builtin_HEXAGON_M2_mpyd_nac_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpy(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpynac_RlRl_s1 __builtin_HEXAGON_M2_mpyd_nac_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.h):rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRh_rnd __builtin_HEXAGON_M2_mpyd_rnd_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.h):<<1:rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRh_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.l):rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRl_rnd __builtin_HEXAGON_M2_mpyd_rnd_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.h,Rt32.l):<<1:rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RhRl_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.h):rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRh_rnd __builtin_HEXAGON_M2_mpyd_rnd_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.h):<<1:rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRh_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.l):rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRl_rnd __builtin_HEXAGON_M2_mpyd_rnd_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpy(Rs32.l,Rt32.l):<<1:rnd
-   C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_s1_rnd(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpy_RlRl_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyi(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mpyi_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyi_RR __builtin_HEXAGON_M2_mpyi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyi(Rs32,#m9)
-   C Intrinsic Prototype: Word32 Q6_R_mpyi_RI(Word32 Rs, Word32 Im9)
-   Instruction Type:      M
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_mpyi_RI __builtin_HEXAGON_M2_mpysmi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpysu(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mpysu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpysu_RR __builtin_HEXAGON_M2_mpysu_up
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RhRh __builtin_HEXAGON_M2_mpyu_acc_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RhRh_s1 __builtin_HEXAGON_M2_mpyu_acc_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RhRl __builtin_HEXAGON_M2_mpyu_acc_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RhRl_s1 __builtin_HEXAGON_M2_mpyu_acc_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RlRh __builtin_HEXAGON_M2_mpyu_acc_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RlRh_s1 __builtin_HEXAGON_M2_mpyu_acc_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RlRl __builtin_HEXAGON_M2_mpyu_acc_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpyu(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyuacc_RlRl_s1 __builtin_HEXAGON_M2_mpyu_acc_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RhRh __builtin_HEXAGON_M2_mpyu_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RhRh_s1 __builtin_HEXAGON_M2_mpyu_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RhRl __builtin_HEXAGON_M2_mpyu_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RhRl_s1 __builtin_HEXAGON_M2_mpyu_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RlRh __builtin_HEXAGON_M2_mpyu_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RlRh_s1 __builtin_HEXAGON_M2_mpyu_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RlRl __builtin_HEXAGON_M2_mpyu_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RlRl_s1 __builtin_HEXAGON_M2_mpyu_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RhRh __builtin_HEXAGON_M2_mpyu_nac_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RhRh_s1 __builtin_HEXAGON_M2_mpyu_nac_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RhRl __builtin_HEXAGON_M2_mpyu_nac_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RhRl_s1 __builtin_HEXAGON_M2_mpyu_nac_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RlRh __builtin_HEXAGON_M2_mpyu_nac_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RlRh_s1 __builtin_HEXAGON_M2_mpyu_nac_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RlRl __builtin_HEXAGON_M2_mpyu_nac_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyu(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyunac_RlRl_s1 __builtin_HEXAGON_M2_mpyu_nac_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyu(Rs32,Rt32)
-   C Intrinsic Prototype: UWord32 Q6_R_mpyu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyu_RR __builtin_HEXAGON_M2_mpyu_up
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RhRh __builtin_HEXAGON_M2_mpyud_acc_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RhRh_s1 __builtin_HEXAGON_M2_mpyud_acc_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RhRl __builtin_HEXAGON_M2_mpyud_acc_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RhRl_s1 __builtin_HEXAGON_M2_mpyud_acc_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RlRh __builtin_HEXAGON_M2_mpyud_acc_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RlRh_s1 __builtin_HEXAGON_M2_mpyud_acc_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RlRl __builtin_HEXAGON_M2_mpyud_acc_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=mpyu(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyuacc_RlRl_s1 __builtin_HEXAGON_M2_mpyud_acc_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RhRh __builtin_HEXAGON_M2_mpyud_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RhRh_s1 __builtin_HEXAGON_M2_mpyud_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RhRl __builtin_HEXAGON_M2_mpyud_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RhRl_s1 __builtin_HEXAGON_M2_mpyud_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRh(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RlRh __builtin_HEXAGON_M2_mpyud_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRh_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RlRh_s1 __builtin_HEXAGON_M2_mpyud_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRl(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RlRl __builtin_HEXAGON_M2_mpyud_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=mpyu(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRl_s1(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyu_RlRl_s1 __builtin_HEXAGON_M2_mpyud_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.h,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RhRh __builtin_HEXAGON_M2_mpyud_nac_hh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.h,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RhRh_s1 __builtin_HEXAGON_M2_mpyud_nac_hh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.h,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RhRl __builtin_HEXAGON_M2_mpyud_nac_hl_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.h,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RhRl_s1 __builtin_HEXAGON_M2_mpyud_nac_hl_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.l,Rt32.h)
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RlRh __builtin_HEXAGON_M2_mpyud_nac_lh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.l,Rt32.h):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RlRh_s1 __builtin_HEXAGON_M2_mpyud_nac_lh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.l,Rt32.l)
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RlRl __builtin_HEXAGON_M2_mpyud_nac_ll_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=mpyu(Rs32.l,Rt32.l):<<1
-   C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_mpyunac_RlRl_s1 __builtin_HEXAGON_M2_mpyud_nac_ll_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mpyui(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mpyui_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_mpyui_RR __builtin_HEXAGON_M2_mpyui
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=add(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_addnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_addnac_RR __builtin_HEXAGON_M2_nacci
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=add(Rs32,#s8)
-   C Intrinsic Prototype: Word32 Q6_R_addnac_RI(Word32 Rx, Word32 Rs, Word32 Is8)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_addnac_RI __builtin_HEXAGON_M2_naccii
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=sub(Rt32,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_subacc_RR(Word32 Rx, Word32 Rt, Word32 Rs)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_subacc_RR __builtin_HEXAGON_M2_subacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsdiffh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vabsdiffh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsdiffh_PP __builtin_HEXAGON_M2_vabsdiffh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsdiffw(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vabsdiffw_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsdiffw_PP __builtin_HEXAGON_M2_vabsdiffw
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vcmpyi(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vcmpyiacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcmpyiacc_PP_sat __builtin_HEXAGON_M2_vcmac_s0_sat_i
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vcmpyr(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vcmpyracc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcmpyracc_PP_sat __builtin_HEXAGON_M2_vcmac_s0_sat_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vcmpyi(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vcmpyi_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcmpyi_PP_sat __builtin_HEXAGON_M2_vcmpy_s0_sat_i
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vcmpyr(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vcmpyr_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcmpyr_PP_sat __builtin_HEXAGON_M2_vcmpy_s0_sat_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vcmpyi(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vcmpyi_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcmpyi_PP_s1_sat __builtin_HEXAGON_M2_vcmpy_s1_sat_i
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vcmpyr(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vcmpyr_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcmpyr_PP_s1_sat __builtin_HEXAGON_M2_vcmpy_s1_sat_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vdmpy(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vdmpyacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vdmpyacc_PP_sat __builtin_HEXAGON_M2_vdmacs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vdmpy(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vdmpyacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vdmpyacc_PP_s1_sat __builtin_HEXAGON_M2_vdmacs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vdmpy(Rss32,Rtt32):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_vdmpy_PP_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vdmpy_PP_rnd_sat __builtin_HEXAGON_M2_vdmpyrs_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vdmpy(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_vdmpy_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vdmpy_PP_s1_rnd_sat __builtin_HEXAGON_M2_vdmpyrs_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vdmpy(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vdmpy_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vdmpy_PP_sat __builtin_HEXAGON_M2_vdmpys_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vdmpy(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vdmpy_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vdmpy_PP_s1_sat __builtin_HEXAGON_M2_vdmpys_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyh(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhacc_RR __builtin_HEXAGON_M2_vmac2
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyeh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyehacc_PP __builtin_HEXAGON_M2_vmac2es
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyeh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyehacc_PP_sat __builtin_HEXAGON_M2_vmac2es_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyeh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyehacc_PP_s1_sat __builtin_HEXAGON_M2_vmac2es_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyh(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhacc_RR_sat __builtin_HEXAGON_M2_vmac2s_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyh(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhacc_RR_s1_sat __builtin_HEXAGON_M2_vmac2s_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyhsu(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhsuacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhsuacc_RR_sat __builtin_HEXAGON_M2_vmac2su_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpyhsu(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhsuacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhsuacc_RR_s1_sat __builtin_HEXAGON_M2_vmac2su_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyeh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyeh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyeh_PP_sat __builtin_HEXAGON_M2_vmpy2es_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyeh(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyeh_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyeh_PP_s1_sat __builtin_HEXAGON_M2_vmpy2es_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyh(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyh_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyh_RR_sat __builtin_HEXAGON_M2_vmpy2s_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vmpyh(Rs32,Rt32):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_vmpyh_RR_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vmpyh_RR_rnd_sat __builtin_HEXAGON_M2_vmpy2s_s0pack
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyh(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyh_RR_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyh_RR_s1_sat __builtin_HEXAGON_M2_vmpy2s_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vmpyh(Rs32,Rt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_vmpyh_RR_s1_rnd_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vmpyh_RR_s1_rnd_sat __builtin_HEXAGON_M2_vmpy2s_s1pack
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyhsu(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhsu_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhsu_RR_sat __builtin_HEXAGON_M2_vmpy2su_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpyhsu(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vmpyhsu_RR_s1_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpyhsu_RR_s1_sat __builtin_HEXAGON_M2_vmpy2su_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vraddh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word32 Q6_R_vraddh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vraddh_PP __builtin_HEXAGON_M2_vraddh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vradduh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word32 Q6_R_vradduh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vradduh_PP __builtin_HEXAGON_M2_vradduh
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcmpyi(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyiacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyiacc_PP __builtin_HEXAGON_M2_vrcmaci_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcmpyi(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyiacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyiacc_PP_conj __builtin_HEXAGON_M2_vrcmaci_s0c
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcmpyr(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyracc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyracc_PP __builtin_HEXAGON_M2_vrcmacr_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcmpyr(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyracc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyracc_PP_conj __builtin_HEXAGON_M2_vrcmacr_s0c
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrcmpyi(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyi_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyi_PP __builtin_HEXAGON_M2_vrcmpyi_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrcmpyi(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyi_PP_conj(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyi_PP_conj __builtin_HEXAGON_M2_vrcmpyi_s0c
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrcmpyr(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyr_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyr_PP __builtin_HEXAGON_M2_vrcmpyr_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrcmpyr(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpyr_PP_conj(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcmpyr_PP_conj __builtin_HEXAGON_M2_vrcmpyr_s0c
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcmpys(Rss32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpysacc_PR_s1_sat(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_vrcmpysacc_PR_s1_sat __builtin_HEXAGON_M2_vrcmpys_acc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrcmpys(Rss32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vrcmpys_PR_s1_sat(Word64 Rss, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_vrcmpys_PR_s1_sat __builtin_HEXAGON_M2_vrcmpys_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vrcmpys(Rss32,Rt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_vrcmpys_PR_s1_rnd_sat(Word64 Rss, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vrcmpys_PR_s1_rnd_sat __builtin_HEXAGON_M2_vrcmpys_s1rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpyh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpyhacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpyhacc_PP __builtin_HEXAGON_M2_vrmac_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpyh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpyh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpyh_PP __builtin_HEXAGON_M2_vrmpy_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=xor(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_xorxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_xorxacc_RR __builtin_HEXAGON_M2_xor_xacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=and(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_andand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andand_RR __builtin_HEXAGON_M4_and_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=and(Rs32,~Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_andand_RnR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andand_RnR __builtin_HEXAGON_M4_and_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=or(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_orand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_orand_RR __builtin_HEXAGON_M4_and_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=xor(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_xorand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_xorand_RR __builtin_HEXAGON_M4_and_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyiwh(Rss32,Rt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyiwh_PR_s1_rnd_sat(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpyiwh_PR_s1_rnd_sat __builtin_HEXAGON_M4_cmpyi_wh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyiwh(Rss32,Rt32*):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyiwh_PR_conj_s1_rnd_sat(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpyiwh_PR_conj_s1_rnd_sat __builtin_HEXAGON_M4_cmpyi_whc
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyrwh(Rss32,Rt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyrwh_PR_s1_rnd_sat(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpyrwh_PR_s1_rnd_sat __builtin_HEXAGON_M4_cmpyr_wh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyrwh(Rss32,Rt32*):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyrwh_PR_conj_s1_rnd_sat(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cmpyrwh_PR_conj_s1_rnd_sat __builtin_HEXAGON_M4_cmpyr_whc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=mpy(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpyacc_RR_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyacc_RR_s1_sat __builtin_HEXAGON_M4_mac_up_s1_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(#u6,mpyi(Rs32,#U6))
-   C Intrinsic Prototype: Word32 Q6_R_add_mpyi_IRI(Word32 Iu6, Word32 Rs, Word32 IU6)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_mpyi_IRI __builtin_HEXAGON_M4_mpyri_addi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Ru32,mpyi(Rs32,#u6))
-   C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RRI(Word32 Ru, Word32 Rs, Word32 Iu6)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_mpyi_RRI __builtin_HEXAGON_M4_mpyri_addr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Ru32,mpyi(#u6:2,Rs32))
-   C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RIR(Word32 Ru, Word32 Iu6_2, Word32 Rs)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_mpyi_RIR __builtin_HEXAGON_M4_mpyri_addr_u2
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(#u6,mpyi(Rs32,Rt32))
-   C Intrinsic Prototype: Word32 Q6_R_add_mpyi_IRR(Word32 Iu6, Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_mpyi_IRR __builtin_HEXAGON_M4_mpyrr_addi
-
-/* ==========================================================================
-   Assembly Syntax:       Ry32=add(Ru32,mpyi(Ry32,Rs32))
-   C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RRR(Word32 Ru, Word32 Ry, Word32 Rs)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_mpyi_RRR __builtin_HEXAGON_M4_mpyrr_addr
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpy(Rs32,Rt32):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_mpynac_RR_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpynac_RR_s1_sat __builtin_HEXAGON_M4_nac_up_s1_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=and(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_andor_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andor_RR __builtin_HEXAGON_M4_or_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=and(Rs32,~Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_andor_RnR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andor_RnR __builtin_HEXAGON_M4_or_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=or(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_oror_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_oror_RR __builtin_HEXAGON_M4_or_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=xor(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_xoror_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_xoror_RR __builtin_HEXAGON_M4_or_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=pmpyw(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_pmpyw_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_pmpyw_RR __builtin_HEXAGON_M4_pmpyw
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=pmpyw(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_pmpywxacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_pmpywxacc_RR __builtin_HEXAGON_M4_pmpyw_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vpmpyh(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vpmpyh_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vpmpyh_RR __builtin_HEXAGON_M4_vpmpyh
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=vpmpyh(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vpmpyhxacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vpmpyhxacc_RR __builtin_HEXAGON_M4_vpmpyh_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpyweh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpywehacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpywehacc_PP __builtin_HEXAGON_M4_vrmpyeh_acc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpyweh(Rss32,Rtt32):<<1
-   C Intrinsic Prototype: Word64 Q6_P_vrmpywehacc_PP_s1(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpywehacc_PP_s1 __builtin_HEXAGON_M4_vrmpyeh_acc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpyweh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpyweh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpyweh_PP __builtin_HEXAGON_M4_vrmpyeh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpyweh(Rss32,Rtt32):<<1
-   C Intrinsic Prototype: Word64 Q6_P_vrmpyweh_PP_s1(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpyweh_PP_s1 __builtin_HEXAGON_M4_vrmpyeh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpywoh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpywohacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpywohacc_PP __builtin_HEXAGON_M4_vrmpyoh_acc_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpywoh(Rss32,Rtt32):<<1
-   C Intrinsic Prototype: Word64 Q6_P_vrmpywohacc_PP_s1(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpywohacc_PP_s1 __builtin_HEXAGON_M4_vrmpyoh_acc_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpywoh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpywoh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpywoh_PP __builtin_HEXAGON_M4_vrmpyoh_s0
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpywoh(Rss32,Rtt32):<<1
-   C Intrinsic Prototype: Word64 Q6_P_vrmpywoh_PP_s1(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpywoh_PP_s1 __builtin_HEXAGON_M4_vrmpyoh_s1
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=and(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_andxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andxacc_RR __builtin_HEXAGON_M4_xor_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=and(Rs32,~Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_andxacc_RnR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andxacc_RnR __builtin_HEXAGON_M4_xor_andn
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=or(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_orxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_orxacc_RR __builtin_HEXAGON_M4_xor_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=xor(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_xorxacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_xorxacc_PP __builtin_HEXAGON_M4_xor_xacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vdmpybsu(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vdmpybsuacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vdmpybsuacc_PP_sat __builtin_HEXAGON_M5_vdmacbsu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vdmpybsu(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vdmpybsu_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vdmpybsu_PP_sat __builtin_HEXAGON_M5_vdmpybsu
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpybsu(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmpybsuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpybsuacc_RR __builtin_HEXAGON_M5_vmacbsu
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vmpybu(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmpybuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpybuacc_RR __builtin_HEXAGON_M5_vmacbuu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpybsu(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmpybsu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpybsu_RR __builtin_HEXAGON_M5_vmpybsu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vmpybu(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vmpybu_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vmpybu_RR __builtin_HEXAGON_M5_vmpybuu
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpybsu(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpybsuacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpybsuacc_PP __builtin_HEXAGON_M5_vrmacbsu
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrmpybu(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpybuacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpybuacc_PP __builtin_HEXAGON_M5_vrmacbuu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpybsu(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpybsu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpybsu_PP __builtin_HEXAGON_M5_vrmpybsu
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrmpybu(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrmpybu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrmpybu_PP __builtin_HEXAGON_M5_vrmpybuu
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=addasl(Rt32,Rs32,#u3)
-   C Intrinsic Prototype: Word32 Q6_R_addasl_RRI(Word32 Rt, Word32 Rs, Word32 Iu3)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_addasl_RRI __builtin_HEXAGON_S2_addasl_rrri
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=asl(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asl_PI(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asl_PI __builtin_HEXAGON_S2_asl_i_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=asl(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_aslacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslacc_PI __builtin_HEXAGON_S2_asl_i_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=asl(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asland_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asland_PI __builtin_HEXAGON_S2_asl_i_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=asl(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_aslnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslnac_PI __builtin_HEXAGON_S2_asl_i_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=asl(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_aslor_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslor_PI __builtin_HEXAGON_S2_asl_i_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=asl(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_aslxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslxacc_PI __builtin_HEXAGON_S2_asl_i_p_xacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asl(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asl_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asl_RI __builtin_HEXAGON_S2_asl_i_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=asl(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_aslacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslacc_RI __builtin_HEXAGON_S2_asl_i_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=asl(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asland_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asland_RI __builtin_HEXAGON_S2_asl_i_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=asl(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_aslnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslnac_RI __builtin_HEXAGON_S2_asl_i_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=asl(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_aslor_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslor_RI __builtin_HEXAGON_S2_asl_i_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asl(Rs32,#u5):sat
-   C Intrinsic Prototype: Word32 Q6_R_asl_RI_sat(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asl_RI_sat __builtin_HEXAGON_S2_asl_i_r_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=asl(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_aslxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslxacc_RI __builtin_HEXAGON_S2_asl_i_r_xacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaslh(Rss32,#u4)
-   C Intrinsic Prototype: Word64 Q6_P_vaslh_PI(Word64 Rss, Word32 Iu4)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaslh_PI __builtin_HEXAGON_S2_asl_i_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaslw(Rss32,#u5)
-   C Intrinsic Prototype: Word64 Q6_P_vaslw_PI(Word64 Rss, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaslw_PI __builtin_HEXAGON_S2_asl_i_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=asl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asl_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asl_PR __builtin_HEXAGON_S2_asl_r_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=asl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_aslacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslacc_PR __builtin_HEXAGON_S2_asl_r_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=asl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asland_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asland_PR __builtin_HEXAGON_S2_asl_r_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=asl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_aslnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslnac_PR __builtin_HEXAGON_S2_asl_r_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=asl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_aslor_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslor_PR __builtin_HEXAGON_S2_asl_r_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=asl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_aslxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_aslxacc_PR __builtin_HEXAGON_S2_asl_r_p_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asl_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asl_RR __builtin_HEXAGON_S2_asl_r_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=asl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_aslacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslacc_RR __builtin_HEXAGON_S2_asl_r_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=asl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asland_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asland_RR __builtin_HEXAGON_S2_asl_r_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=asl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_aslnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslnac_RR __builtin_HEXAGON_S2_asl_r_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=asl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_aslor_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_aslor_RR __builtin_HEXAGON_S2_asl_r_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asl(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_asl_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asl_RR_sat __builtin_HEXAGON_S2_asl_r_r_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaslh(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vaslh_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaslh_PR __builtin_HEXAGON_S2_asl_r_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vaslw(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vaslw_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vaslw_PR __builtin_HEXAGON_S2_asl_r_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=asr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asr_PI(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asr_PI __builtin_HEXAGON_S2_asr_i_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=asr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asracc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asracc_PI __builtin_HEXAGON_S2_asr_i_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=asr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asrand_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asrand_PI __builtin_HEXAGON_S2_asr_i_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=asr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asrnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asrnac_PI __builtin_HEXAGON_S2_asr_i_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=asr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asror_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asror_PI __builtin_HEXAGON_S2_asr_i_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=asr(Rss32,#u6):rnd
-   C Intrinsic Prototype: Word64 Q6_P_asr_PI_rnd(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asr_PI_rnd __builtin_HEXAGON_S2_asr_i_p_rnd
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=asrrnd(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_asrrnd_PI(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_asrrnd_PI __builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asr_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asr_RI __builtin_HEXAGON_S2_asr_i_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=asr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asracc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asracc_RI __builtin_HEXAGON_S2_asr_i_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=asr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asrand_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asrand_RI __builtin_HEXAGON_S2_asr_i_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=asr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asrnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asrnac_RI __builtin_HEXAGON_S2_asr_i_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=asr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asror_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asror_RI __builtin_HEXAGON_S2_asr_i_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asr(Rs32,#u5):rnd
-   C Intrinsic Prototype: Word32 Q6_R_asr_RI_rnd(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asr_RI_rnd __builtin_HEXAGON_S2_asr_i_r_rnd
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asrrnd(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_asrrnd_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_asrrnd_RI __builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vasrw(Rss32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_vasrw_PI(Word64 Rss, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vasrw_PI __builtin_HEXAGON_S2_asr_i_svw_trun
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vasrh(Rss32,#u4)
-   C Intrinsic Prototype: Word64 Q6_P_vasrh_PI(Word64 Rss, Word32 Iu4)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vasrh_PI __builtin_HEXAGON_S2_asr_i_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vasrw(Rss32,#u5)
-   C Intrinsic Prototype: Word64 Q6_P_vasrw_PI(Word64 Rss, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vasrw_PI __builtin_HEXAGON_S2_asr_i_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=asr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asr_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asr_PR __builtin_HEXAGON_S2_asr_r_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=asr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asracc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asracc_PR __builtin_HEXAGON_S2_asr_r_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=asr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asrand_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asrand_PR __builtin_HEXAGON_S2_asr_r_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=asr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asrnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asrnac_PR __builtin_HEXAGON_S2_asr_r_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=asr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asror_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asror_PR __builtin_HEXAGON_S2_asr_r_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=asr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_asrxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_asrxacc_PR __builtin_HEXAGON_S2_asr_r_p_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asr_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asr_RR __builtin_HEXAGON_S2_asr_r_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=asr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asracc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asracc_RR __builtin_HEXAGON_S2_asr_r_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=asr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asrand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asrand_RR __builtin_HEXAGON_S2_asr_r_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=asr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asrnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asrnac_RR __builtin_HEXAGON_S2_asr_r_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=asr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_asror_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asror_RR __builtin_HEXAGON_S2_asr_r_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=asr(Rs32,Rt32):sat
-   C Intrinsic Prototype: Word32 Q6_R_asr_RR_sat(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_asr_RR_sat __builtin_HEXAGON_S2_asr_r_r_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vasrw(Rss32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_vasrw_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vasrw_PR __builtin_HEXAGON_S2_asr_r_svw_trun
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vasrh(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vasrh_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vasrh_PR __builtin_HEXAGON_S2_asr_r_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vasrw(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vasrw_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vasrw_PR __builtin_HEXAGON_S2_asr_r_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=brev(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_brev_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_brev_R __builtin_HEXAGON_S2_brev
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=brev(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_brev_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_brev_P __builtin_HEXAGON_S2_brevp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cl0(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_cl0_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cl0_R __builtin_HEXAGON_S2_cl0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cl0(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_cl0_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cl0_P __builtin_HEXAGON_S2_cl0p
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cl1(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_cl1_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cl1_R __builtin_HEXAGON_S2_cl1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cl1(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_cl1_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_cl1_P __builtin_HEXAGON_S2_cl1p
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=clb(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_clb_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_clb_R __builtin_HEXAGON_S2_clb
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=normamt(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_normamt_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_normamt_R __builtin_HEXAGON_S2_clbnorm
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=clb(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_clb_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_clb_P __builtin_HEXAGON_S2_clbp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=clrbit(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_clrbit_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_clrbit_RI __builtin_HEXAGON_S2_clrbit_i
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=clrbit(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_clrbit_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_clrbit_RR __builtin_HEXAGON_S2_clrbit_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=ct0(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_ct0_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_ct0_R __builtin_HEXAGON_S2_ct0
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=ct0(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_ct0_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_ct0_P __builtin_HEXAGON_S2_ct0p
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=ct1(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_ct1_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_ct1_R __builtin_HEXAGON_S2_ct1
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=ct1(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_ct1_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_ct1_P __builtin_HEXAGON_S2_ct1p
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=deinterleave(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_deinterleave_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_deinterleave_P __builtin_HEXAGON_S2_deinterleave
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=extractu(Rs32,#u5,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_extractu_RII(Word32 Rs, Word32 Iu5, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_extractu_RII __builtin_HEXAGON_S2_extractu
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=extractu(Rs32,Rtt32)
-   C Intrinsic Prototype: Word32 Q6_R_extractu_RP(Word32 Rs, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_extractu_RP __builtin_HEXAGON_S2_extractu_rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=extractu(Rss32,#u6,#U6)
-   C Intrinsic Prototype: Word64 Q6_P_extractu_PII(Word64 Rss, Word32 Iu6, Word32 IU6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_extractu_PII __builtin_HEXAGON_S2_extractup
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=extractu(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_extractu_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_extractu_PP __builtin_HEXAGON_S2_extractup_rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=insert(Rs32,#u5,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_insert_RII(Word32 Rx, Word32 Rs, Word32 Iu5, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_insert_RII __builtin_HEXAGON_S2_insert
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=insert(Rs32,Rtt32)
-   C Intrinsic Prototype: Word32 Q6_R_insert_RP(Word32 Rx, Word32 Rs, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_insert_RP __builtin_HEXAGON_S2_insert_rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=insert(Rss32,#u6,#U6)
-   C Intrinsic Prototype: Word64 Q6_P_insert_PII(Word64 Rxx, Word64 Rss, Word32 Iu6, Word32 IU6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_insert_PII __builtin_HEXAGON_S2_insertp
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32=insert(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_insert_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_insert_PP __builtin_HEXAGON_S2_insertp_rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=interleave(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_interleave_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_interleave_P __builtin_HEXAGON_S2_interleave
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=lfs(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_lfs_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lfs_PP __builtin_HEXAGON_S2_lfsp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=lsl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsl_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsl_PR __builtin_HEXAGON_S2_lsl_r_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=lsl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lslacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lslacc_PR __builtin_HEXAGON_S2_lsl_r_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=lsl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsland_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsland_PR __builtin_HEXAGON_S2_lsl_r_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=lsl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lslnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lslnac_PR __builtin_HEXAGON_S2_lsl_r_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=lsl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lslor_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lslor_PR __builtin_HEXAGON_S2_lsl_r_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=lsl(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lslxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lslxacc_PR __builtin_HEXAGON_S2_lsl_r_p_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=lsl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsl_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsl_RR __builtin_HEXAGON_S2_lsl_r_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=lsl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lslacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lslacc_RR __builtin_HEXAGON_S2_lsl_r_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=lsl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsland_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsland_RR __builtin_HEXAGON_S2_lsl_r_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=lsl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lslnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lslnac_RR __builtin_HEXAGON_S2_lsl_r_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=lsl(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lslor_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lslor_RR __builtin_HEXAGON_S2_lsl_r_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vlslh(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vlslh_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vlslh_PR __builtin_HEXAGON_S2_lsl_r_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vlslw(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vlslw_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vlslw_PR __builtin_HEXAGON_S2_lsl_r_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=lsr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_lsr_PI(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsr_PI __builtin_HEXAGON_S2_lsr_i_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=lsr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_lsracc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsracc_PI __builtin_HEXAGON_S2_lsr_i_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=lsr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_lsrand_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsrand_PI __builtin_HEXAGON_S2_lsr_i_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=lsr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_lsrnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsrnac_PI __builtin_HEXAGON_S2_lsr_i_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=lsr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_lsror_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsror_PI __builtin_HEXAGON_S2_lsr_i_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=lsr(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_lsrxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsrxacc_PI __builtin_HEXAGON_S2_lsr_i_p_xacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=lsr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_lsr_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsr_RI __builtin_HEXAGON_S2_lsr_i_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=lsr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_lsracc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsracc_RI __builtin_HEXAGON_S2_lsr_i_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=lsr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_lsrand_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsrand_RI __builtin_HEXAGON_S2_lsr_i_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=lsr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_lsrnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsrnac_RI __builtin_HEXAGON_S2_lsr_i_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=lsr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_lsror_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsror_RI __builtin_HEXAGON_S2_lsr_i_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=lsr(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_lsrxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsrxacc_RI __builtin_HEXAGON_S2_lsr_i_r_xacc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vlsrh(Rss32,#u4)
-   C Intrinsic Prototype: Word64 Q6_P_vlsrh_PI(Word64 Rss, Word32 Iu4)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vlsrh_PI __builtin_HEXAGON_S2_lsr_i_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vlsrw(Rss32,#u5)
-   C Intrinsic Prototype: Word64 Q6_P_vlsrw_PI(Word64 Rss, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vlsrw_PI __builtin_HEXAGON_S2_lsr_i_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=lsr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsr_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsr_PR __builtin_HEXAGON_S2_lsr_r_p
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=lsr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsracc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsracc_PR __builtin_HEXAGON_S2_lsr_r_p_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=lsr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsrand_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsrand_PR __builtin_HEXAGON_S2_lsr_r_p_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=lsr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsrnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsrnac_PR __builtin_HEXAGON_S2_lsr_r_p_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=lsr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsror_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsror_PR __builtin_HEXAGON_S2_lsr_r_p_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=lsr(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_lsrxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_lsrxacc_PR __builtin_HEXAGON_S2_lsr_r_p_xor
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=lsr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsr_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsr_RR __builtin_HEXAGON_S2_lsr_r_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=lsr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsracc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsracc_RR __builtin_HEXAGON_S2_lsr_r_r_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=lsr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsrand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsrand_RR __builtin_HEXAGON_S2_lsr_r_r_and
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=lsr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsrnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsrnac_RR __builtin_HEXAGON_S2_lsr_r_r_nac
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=lsr(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsror_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsror_RR __builtin_HEXAGON_S2_lsr_r_r_or
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vlsrh(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vlsrh_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vlsrh_PR __builtin_HEXAGON_S2_lsr_r_vh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vlsrw(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vlsrw_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vlsrw_PR __builtin_HEXAGON_S2_lsr_r_vw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=packhl(Rs32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_packhl_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU32_3op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_packhl_RR __builtin_HEXAGON_S2_packhl
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=parity(Rss32,Rtt32)
-   C Intrinsic Prototype: Word32 Q6_R_parity_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_parity_PP __builtin_HEXAGON_S2_parityp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=setbit(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_setbit_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_setbit_RI __builtin_HEXAGON_S2_setbit_i
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=setbit(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_setbit_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_setbit_RR __builtin_HEXAGON_S2_setbit_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=shuffeb(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_shuffeb_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_shuffeb_PP __builtin_HEXAGON_S2_shuffeb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=shuffeh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_shuffeh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_shuffeh_PP __builtin_HEXAGON_S2_shuffeh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=shuffob(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_shuffob_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_shuffob_PP __builtin_HEXAGON_S2_shuffob
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=shuffoh(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_shuffoh_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_shuffoh_PP __builtin_HEXAGON_S2_shuffoh
-
-/* ==========================================================================
-   Assembly Syntax:       memb(Rx32++#s4:0:circ(Mu2))=Rt32
-   C Intrinsic Prototype: void Q6_memb_IMR_circ(void** Rx, Word32 Is4_0, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memb_IMR_circ __builtin_HEXAGON_S2_storerb_pci
-
-/* ==========================================================================
-   Assembly Syntax:       memb(Rx32++I:circ(Mu2))=Rt32
-   C Intrinsic Prototype: void Q6_memb_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memb_MR_circ __builtin_HEXAGON_S2_storerb_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       memd(Rx32++#s4:3:circ(Mu2))=Rtt32
-   C Intrinsic Prototype: void Q6_memd_IMP_circ(void** Rx, Word32 Is4_3, Word32 Mu, Word64 Rtt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memd_IMP_circ __builtin_HEXAGON_S2_storerd_pci
-
-/* ==========================================================================
-   Assembly Syntax:       memd(Rx32++I:circ(Mu2))=Rtt32
-   C Intrinsic Prototype: void Q6_memd_MP_circ(void** Rx, Word32 Mu, Word64 Rtt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memd_MP_circ __builtin_HEXAGON_S2_storerd_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       memh(Rx32++#s4:1:circ(Mu2))=Rt32.h
-   C Intrinsic Prototype: void Q6_memh_IMRh_circ(void** Rx, Word32 Is4_1, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memh_IMRh_circ __builtin_HEXAGON_S2_storerf_pci
-
-/* ==========================================================================
-   Assembly Syntax:       memh(Rx32++I:circ(Mu2))=Rt32.h
-   C Intrinsic Prototype: void Q6_memh_MRh_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memh_MRh_circ __builtin_HEXAGON_S2_storerf_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       memh(Rx32++#s4:1:circ(Mu2))=Rt32
-   C Intrinsic Prototype: void Q6_memh_IMR_circ(void** Rx, Word32 Is4_1, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memh_IMR_circ __builtin_HEXAGON_S2_storerh_pci
-
-/* ==========================================================================
-   Assembly Syntax:       memh(Rx32++I:circ(Mu2))=Rt32
-   C Intrinsic Prototype: void Q6_memh_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memh_MR_circ __builtin_HEXAGON_S2_storerh_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       memw(Rx32++#s4:2:circ(Mu2))=Rt32
-   C Intrinsic Prototype: void Q6_memw_IMR_circ(void** Rx, Word32 Is4_2, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memw_IMR_circ __builtin_HEXAGON_S2_storeri_pci
-
-/* ==========================================================================
-   Assembly Syntax:       memw(Rx32++I:circ(Mu2))=Rt32
-   C Intrinsic Prototype: void Q6_memw_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)
-   Instruction Type:      ST
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_memw_MR_circ __builtin_HEXAGON_S2_storeri_pcr
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsathb(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_vsathb_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsathb_R __builtin_HEXAGON_S2_svsathb
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsathub(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_vsathub_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsathub_R __builtin_HEXAGON_S2_svsathub
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=tableidxb(Rs32,#u4,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_tableidxb_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_tableidxb_RII __builtin_HEXAGON_S2_tableidxb_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=tableidxd(Rs32,#u4,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_tableidxd_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_tableidxd_RII __builtin_HEXAGON_S2_tableidxd_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=tableidxh(Rs32,#u4,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_tableidxh_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_tableidxh_RII __builtin_HEXAGON_S2_tableidxh_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=tableidxw(Rs32,#u4,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_tableidxw_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_tableidxw_RII __builtin_HEXAGON_S2_tableidxw_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=togglebit(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_togglebit_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_togglebit_RI __builtin_HEXAGON_S2_togglebit_i
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=togglebit(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_togglebit_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_togglebit_RR __builtin_HEXAGON_S2_togglebit_r
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=tstbit(Rs32,#u5)
-   C Intrinsic Prototype: Byte Q6_p_tstbit_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_tstbit_RI __builtin_HEXAGON_S2_tstbit_i
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=tstbit(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_tstbit_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_tstbit_RR __builtin_HEXAGON_S2_tstbit_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=valignb(Rtt32,Rss32,#u3)
-   C Intrinsic Prototype: Word64 Q6_P_valignb_PPI(Word64 Rtt, Word64 Rss, Word32 Iu3)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_valignb_PPI __builtin_HEXAGON_S2_valignib
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=valignb(Rtt32,Rss32,Pu4)
-   C Intrinsic Prototype: Word64 Q6_P_valignb_PPp(Word64 Rtt, Word64 Rss, Byte Pu)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_valignb_PPp __builtin_HEXAGON_S2_valignrb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vcnegh(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vcnegh_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcnegh_PR __builtin_HEXAGON_S2_vcnegh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vcrotate(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vcrotate_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vcrotate_PR __builtin_HEXAGON_S2_vcrotate
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcnegh(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_vrcneghacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcneghacc_PR __builtin_HEXAGON_S2_vrcnegh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vrndwh(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vrndwh_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vrndwh_P __builtin_HEXAGON_S2_vrndpackwh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vrndwh(Rss32):sat
-   C Intrinsic Prototype: Word32 Q6_R_vrndwh_P_sat(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vrndwh_P_sat __builtin_HEXAGON_S2_vrndpackwhs
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsathb(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vsathb_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsathb_P __builtin_HEXAGON_S2_vsathb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsathb(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsathb_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsathb_P __builtin_HEXAGON_S2_vsathb_nopack
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsathub(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vsathub_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsathub_P __builtin_HEXAGON_S2_vsathub
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsathub(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsathub_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsathub_P __builtin_HEXAGON_S2_vsathub_nopack
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsatwh(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vsatwh_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsatwh_P __builtin_HEXAGON_S2_vsatwh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsatwh(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsatwh_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsatwh_P __builtin_HEXAGON_S2_vsatwh_nopack
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsatwuh(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vsatwuh_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsatwuh_P __builtin_HEXAGON_S2_vsatwuh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsatwuh(Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vsatwuh_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsatwuh_P __builtin_HEXAGON_S2_vsatwuh_nopack
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vsplatb(Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_vsplatb_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vsplatb_R __builtin_HEXAGON_S2_vsplatrb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsplath(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_vsplath_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsplath_R __builtin_HEXAGON_S2_vsplatrh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vspliceb(Rss32,Rtt32,#u3)
-   C Intrinsic Prototype: Word64 Q6_P_vspliceb_PPI(Word64 Rss, Word64 Rtt, Word32 Iu3)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vspliceb_PPI __builtin_HEXAGON_S2_vspliceib
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vspliceb(Rss32,Rtt32,Pu4)
-   C Intrinsic Prototype: Word64 Q6_P_vspliceb_PPp(Word64 Rss, Word64 Rtt, Byte Pu)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vspliceb_PPp __builtin_HEXAGON_S2_vsplicerb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsxtbh(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_vsxtbh_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsxtbh_R __builtin_HEXAGON_S2_vsxtbh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsxthw(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_vsxthw_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsxthw_R __builtin_HEXAGON_S2_vsxthw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vtrunehb(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vtrunehb_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vtrunehb_P __builtin_HEXAGON_S2_vtrunehb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vtrunewh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vtrunewh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vtrunewh_PP __builtin_HEXAGON_S2_vtrunewh
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vtrunohb(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_vtrunohb_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vtrunohb_P __builtin_HEXAGON_S2_vtrunohb
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vtrunowh(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vtrunowh_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vtrunowh_PP __builtin_HEXAGON_S2_vtrunowh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vzxtbh(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_vzxtbh_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vzxtbh_R __builtin_HEXAGON_S2_vzxtbh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vzxthw(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_vzxthw_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vzxthw_R __builtin_HEXAGON_S2_vzxthw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rs32,add(Ru32,#s6))
-   C Intrinsic Prototype: Word32 Q6_R_add_add_RRI(Word32 Rs, Word32 Ru, Word32 Is6)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_add_RRI __builtin_HEXAGON_S4_addaddi
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=add(#u8,asl(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_add_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_asl_IRI __builtin_HEXAGON_S4_addi_asl_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=add(#u8,lsr(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_add_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_lsr_IRI __builtin_HEXAGON_S4_addi_lsr_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=and(#u8,asl(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_and_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_and_asl_IRI __builtin_HEXAGON_S4_andi_asl_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=and(#u8,lsr(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_and_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_and_lsr_IRI __builtin_HEXAGON_S4_andi_lsr_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(clb(Rs32),#s6)
-   C Intrinsic Prototype: Word32 Q6_R_add_clb_RI(Word32 Rs, Word32 Is6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_clb_RI __builtin_HEXAGON_S4_clbaddi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(clb(Rss32),#s6)
-   C Intrinsic Prototype: Word32 Q6_R_add_clb_PI(Word64 Rss, Word32 Is6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_clb_PI __builtin_HEXAGON_S4_clbpaddi
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=normamt(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_normamt_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_normamt_P __builtin_HEXAGON_S4_clbpnorm
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=extract(Rs32,#u5,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_extract_RII(Word32 Rs, Word32 Iu5, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_extract_RII __builtin_HEXAGON_S4_extract
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=extract(Rs32,Rtt32)
-   C Intrinsic Prototype: Word32 Q6_R_extract_RP(Word32 Rs, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_extract_RP __builtin_HEXAGON_S4_extract_rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=extract(Rss32,#u6,#U6)
-   C Intrinsic Prototype: Word64 Q6_P_extract_PII(Word64 Rss, Word32 Iu6, Word32 IU6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_extract_PII __builtin_HEXAGON_S4_extractp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=extract(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_extract_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_extract_PP __builtin_HEXAGON_S4_extractp_rp
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=lsl(#s6,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_lsl_IR(Word32 Is6, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_lsl_IR __builtin_HEXAGON_S4_lsli
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!tstbit(Rs32,#u5)
-   C Intrinsic Prototype: Byte Q6_p_not_tstbit_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_tstbit_RI __builtin_HEXAGON_S4_ntstbit_i
-
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!tstbit(Rs32,Rt32)
-   C Intrinsic Prototype: Byte Q6_p_not_tstbit_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_tstbit_RR __builtin_HEXAGON_S4_ntstbit_r
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=and(Rs32,#s10)
-   C Intrinsic Prototype: Word32 Q6_R_andor_RI(Word32 Rx, Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_andor_RI __builtin_HEXAGON_S4_or_andi
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=or(Ru32,and(Rx32,#s10))
-   C Intrinsic Prototype: Word32 Q6_R_or_and_RRI(Word32 Ru, Word32 Rx, Word32 Is10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_or_and_RRI __builtin_HEXAGON_S4_or_andix
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=or(Rs32,#s10)
-   C Intrinsic Prototype: Word32 Q6_R_oror_RI(Word32 Rx, Word32 Rs, Word32 Is10)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_oror_RI __builtin_HEXAGON_S4_or_ori
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=or(#u8,asl(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_or_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_or_asl_IRI __builtin_HEXAGON_S4_ori_asl_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=or(#u8,lsr(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_or_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_or_lsr_IRI __builtin_HEXAGON_S4_ori_lsr_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=parity(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_parity_RR(Word32 Rs, Word32 Rt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_parity_RR __builtin_HEXAGON_S4_parity
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=add(Rs32,sub(#s6,Ru32))
-   C Intrinsic Prototype: Word32 Q6_R_add_sub_RIR(Word32 Rs, Word32 Is6, Word32 Ru)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_add_sub_RIR __builtin_HEXAGON_S4_subaddi
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=sub(#u8,asl(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_sub_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_asl_IRI __builtin_HEXAGON_S4_subi_asl_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rx32=sub(#u8,lsr(Rx32,#U5))
-   C Intrinsic Prototype: Word32 Q6_R_sub_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_sub_lsr_IRI __builtin_HEXAGON_S4_subi_lsr_ri
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vrcrotate(Rss32,Rt32,#u2)
-   C Intrinsic Prototype: Word64 Q6_P_vrcrotate_PRI(Word64 Rss, Word32 Rt, Word32 Iu2)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcrotate_PRI __builtin_HEXAGON_S4_vrcrotate
-
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vrcrotate(Rss32,Rt32,#u2)
-   C Intrinsic Prototype: Word64 Q6_P_vrcrotateacc_PRI(Word64 Rxx, Word64 Rss, Word32 Rt, Word32 Iu2)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vrcrotateacc_PRI __builtin_HEXAGON_S4_vrcrotate_acc
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vxaddsubh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vxaddsubh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vxaddsubh_PP_sat __builtin_HEXAGON_S4_vxaddsubh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vxaddsubh(Rss32,Rtt32):rnd:>>1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vxaddsubh_PP_rnd_rs1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vxaddsubh_PP_rnd_rs1_sat __builtin_HEXAGON_S4_vxaddsubhr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vxaddsubw(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vxaddsubw_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vxaddsubw_PP_sat __builtin_HEXAGON_S4_vxaddsubw
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vxsubaddh(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vxsubaddh_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vxsubaddh_PP_sat __builtin_HEXAGON_S4_vxsubaddh
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vxsubaddh(Rss32,Rtt32):rnd:>>1:sat
-   C Intrinsic Prototype: Word64 Q6_P_vxsubaddh_PP_rnd_rs1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vxsubaddh_PP_rnd_rs1_sat __builtin_HEXAGON_S4_vxsubaddhr
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vxsubaddw(Rss32,Rtt32):sat
-   C Intrinsic Prototype: Word64 Q6_P_vxsubaddw_PP_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vxsubaddw_PP_sat __builtin_HEXAGON_S4_vxsubaddw
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vasrhub(Rss32,#u4):rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_vasrhub_PI_rnd_sat(Word64 Rss, Word32 Iu4)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_R_vasrhub_PI_rnd_sat __builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vasrhub(Rss32,#u4):sat
-   C Intrinsic Prototype: Word32 Q6_R_vasrhub_PI_sat(Word64 Rss, Word32 Iu4)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_vasrhub_PI_sat __builtin_HEXAGON_S5_asrhub_sat
-
-/* ==========================================================================
-   Assembly Syntax:       Rd32=popcount(Rss32)
-   C Intrinsic Prototype: Word32 Q6_R_popcount_P(Word64 Rss)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_popcount_P __builtin_HEXAGON_S5_popcountp
-
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vasrh(Rss32,#u4):rnd
-   C Intrinsic Prototype: Word64 Q6_P_vasrh_PI_rnd(Word64 Rss, Word32 Iu4)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_P_vasrh_PI_rnd __builtin_HEXAGON_S5_vasrhrnd_goodsyntax
-
-/* ==========================================================================
-   Assembly Syntax:       dccleana(Rs32)
-   C Intrinsic Prototype: void Q6_dccleana_A(Address Rs)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dccleana_A __builtin_HEXAGON_Y2_dccleana
-
-/* ==========================================================================
-   Assembly Syntax:       dccleaninva(Rs32)
-   C Intrinsic Prototype: void Q6_dccleaninva_A(Address Rs)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dccleaninva_A __builtin_HEXAGON_Y2_dccleaninva
-
-/* ==========================================================================
-   Assembly Syntax:       dcfetch(Rs32)
-   C Intrinsic Prototype: void Q6_dcfetch_A(Address Rs)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_dcfetch_A __builtin_HEXAGON_Y2_dcfetch
-
-/* ==========================================================================
-   Assembly Syntax:       dcinva(Rs32)
-   C Intrinsic Prototype: void Q6_dcinva_A(Address Rs)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dcinva_A __builtin_HEXAGON_Y2_dcinva
-
-/* ==========================================================================
-   Assembly Syntax:       dczeroa(Rs32)
-   C Intrinsic Prototype: void Q6_dczeroa_A(Address Rs)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dczeroa_A __builtin_HEXAGON_Y2_dczeroa
-
-/* ==========================================================================
-   Assembly Syntax:       l2fetch(Rs32,Rt32)
-   C Intrinsic Prototype: void Q6_l2fetch_AR(Address Rs, Word32 Rt)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_l2fetch_AR __builtin_HEXAGON_Y4_l2fetch
-
-/* ==========================================================================
-   Assembly Syntax:       l2fetch(Rs32,Rtt32)
-   C Intrinsic Prototype: void Q6_l2fetch_AP(Address Rs, Word64 Rtt)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_l2fetch_AP __builtin_HEXAGON_Y5_l2fetch
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=rol(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_rol_PI(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_rol_PI __builtin_HEXAGON_S6_rol_i_p
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=rol(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_rolacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_rolacc_PI __builtin_HEXAGON_S6_rol_i_p_acc
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rxx32&=rol(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_roland_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_roland_PI __builtin_HEXAGON_S6_rol_i_p_and
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rxx32-=rol(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_rolnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_rolnac_PI __builtin_HEXAGON_S6_rol_i_p_nac
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rxx32|=rol(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_rolor_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_rolor_PI __builtin_HEXAGON_S6_rol_i_p_or
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rxx32^=rol(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_rolxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_rolxacc_PI __builtin_HEXAGON_S6_rol_i_p_xacc
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rd32=rol(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_rol_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_rol_RI __builtin_HEXAGON_S6_rol_i_r
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rx32+=rol(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_rolacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_rolacc_RI __builtin_HEXAGON_S6_rol_i_r_acc
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rx32&=rol(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_roland_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_roland_RI __builtin_HEXAGON_S6_rol_i_r_and
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=rol(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_rolnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_rolnac_RI __builtin_HEXAGON_S6_rol_i_r_nac
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rx32|=rol(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_rolor_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_rolor_RI __builtin_HEXAGON_S6_rol_i_r_or
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rx32^=rol(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_rolxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_rolxacc_RI __builtin_HEXAGON_S6_rol_i_r_xacc
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsdiffb(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vabsdiffb_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsdiffb_PP __builtin_HEXAGON_M6_vabsdiffb
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vabsdiffub(Rtt32,Rss32)
-   C Intrinsic Prototype: Word64 Q6_P_vabsdiffub_PP(Word64 Rtt, Word64 Rss)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vabsdiffub_PP __builtin_HEXAGON_M6_vabsdiffub
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vsplatb(Rs32)
-   C Intrinsic Prototype: Word64 Q6_P_vsplatb_R(Word32 Rs)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vsplatb_R __builtin_HEXAGON_S6_vsplatrbp
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vtrunehb(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vtrunehb_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vtrunehb_PP __builtin_HEXAGON_S6_vtrunehb_ppp
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vtrunohb(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vtrunohb_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vtrunohb_PP __builtin_HEXAGON_S6_vtrunohb_ppp
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vmem(Rt32):nt
-   C Intrinsic Prototype: HVX_Vector Q6_V_vmem_R_nt(Word32 Rt)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vmem_R_nt __builtin_HEXAGON_V6_ldntnt0
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HEXAGON_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Pd4=!any8(vcmpb.eq(Rss32,Rtt32))
-   C Intrinsic Prototype: Byte Q6_p_not_any8_vcmpb_eq_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      ALU64
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_p_not_any8_vcmpb_eq_PP __builtin_HEXAGON_A6_vcmpbeq_notany
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HEXAGON_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfadd(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfadd_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfadd_PP __builtin_HEXAGON_F2_dfadd
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HEXAGON_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfsub(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfsub_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfsub_PP __builtin_HEXAGON_F2_dfsub
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HEXAGON_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Rx32-=mpyi(Rs32,Rt32)
-   C Intrinsic Prototype: Word32 Q6_R_mpyinac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mpyinac_RR __builtin_HEXAGON_M2_mnaci
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HEXAGON_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Rd32=mask(#u5,#U5)
-   C Intrinsic Prototype: Word32 Q6_R_mask_II(Word32 Iu5, Word32 IU5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_mask_II __builtin_HEXAGON_S2_mask
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=clip(Rs32,#u5)
-   C Intrinsic Prototype: Word32 Q6_R_clip_RI(Word32 Rs, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_R_clip_RI __builtin_HEXAGON_A7_clip
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cround(Rss32,#u6)
-   C Intrinsic Prototype: Word64 Q6_P_cround_PI(Word64 Rss, Word32 Iu6)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cround_PI __builtin_HEXAGON_A7_croundd_ri
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cround(Rss32,Rt32)
-   C Intrinsic Prototype: Word64 Q6_P_cround_PR(Word64 Rss, Word32 Rt)
-   Instruction Type:      S_3op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_cround_PR __builtin_HEXAGON_A7_croundd_rr
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vclip(Rss32,#u5)
-   C Intrinsic Prototype: Word64 Q6_P_vclip_PI(Word64 Rss, Word32 Iu5)
-   Instruction Type:      S_2op
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_vclip_PI __builtin_HEXAGON_A7_vclip
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfmax(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfmax_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmax_PP __builtin_HEXAGON_F2_dfmax
-#endif /* __HEXAGON_ARCH___ >= 67 */
-
-#if __HEXAGON_ARCH__ >= 67
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfmin(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfmin_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmin_PP __builtin_HEXAGON_F2_dfmin
-#endif /* __HEXAGON_ARCH___ >= 67 */
-
-#if __HEXAGON_ARCH__ >= 67
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfmpyfix(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfmpyfix_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmpyfix_PP __builtin_HEXAGON_F2_dfmpyfix
-#endif /* __HEXAGON_ARCH___ >= 67 */
-
-#if __HEXAGON_ARCH__ >= 67
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=dfmpyhh(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfmpyhhacc_PP(Float64 Rxx, Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmpyhhacc_PP __builtin_HEXAGON_F2_dfmpyhh
-#endif /* __HEXAGON_ARCH___ >= 67 */
-
-#if __HEXAGON_ARCH__ >= 67
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=dfmpylh(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfmpylhacc_PP(Float64 Rxx, Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmpylhacc_PP __builtin_HEXAGON_F2_dfmpylh
-#endif /* __HEXAGON_ARCH___ >= 67 */
-
-#if __HEXAGON_ARCH__ >= 67
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=dfmpyll(Rss32,Rtt32)
-   C Intrinsic Prototype: Float64 Q6_P_dfmpyll_PP(Float64 Rss, Float64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_P_dfmpyll_PP __builtin_HEXAGON_F2_dfmpyll
-#endif /* __HEXAGON_ARCH___ >= 67 */
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpyiw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyiw_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyiw_PP __builtin_HEXAGON_M7_dcmpyiw
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpyiw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyiwacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyiwacc_PP __builtin_HEXAGON_M7_dcmpyiw_acc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpyiw(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyiw_PP_conj(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyiw_PP_conj __builtin_HEXAGON_M7_dcmpyiwc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpyiw(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyiwacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyiwacc_PP_conj __builtin_HEXAGON_M7_dcmpyiwc_acc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpyrw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyrw_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyrw_PP __builtin_HEXAGON_M7_dcmpyrw
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpyrw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyrwacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyrwacc_PP __builtin_HEXAGON_M7_dcmpyrw_acc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=cmpyrw(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyrw_PP_conj(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyrw_PP_conj __builtin_HEXAGON_M7_dcmpyrwc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=cmpyrw(Rss32,Rtt32*)
-   C Intrinsic Prototype: Word64 Q6_P_cmpyrwacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_cmpyrwacc_PP_conj __builtin_HEXAGON_M7_dcmpyrwc_acc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rdd32=vdmpyw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vdmpyw_PP(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_vdmpyw_PP __builtin_HEXAGON_M7_vdmpy
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rxx32+=vdmpyw(Rss32,Rtt32)
-   C Intrinsic Prototype: Word64 Q6_P_vdmpywacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_P_vdmpywacc_PP __builtin_HEXAGON_M7_vdmpy_acc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyiw(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyiw_PP_s1_sat __builtin_HEXAGON_M7_wcmpyiw
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyiw(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyiw_PP_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyiw_rnd
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyiw(Rss32,Rtt32*):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_conj_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyiw_PP_conj_s1_sat __builtin_HEXAGON_M7_wcmpyiwc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyiw(Rss32,Rtt32*):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_conj_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyiw_PP_conj_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyiwc_rnd
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyrw(Rss32,Rtt32):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyrw_PP_s1_sat __builtin_HEXAGON_M7_wcmpyrw
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyrw(Rss32,Rtt32):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyrw_PP_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyrw_rnd
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyrw(Rss32,Rtt32*):<<1:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_conj_s1_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyrw_PP_conj_s1_sat __builtin_HEXAGON_M7_wcmpyrwc
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
-/* ==========================================================================
-   Assembly Syntax:       Rd32=cmpyrw(Rss32,Rtt32*):<<1:rnd:sat
-   C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_conj_s1_rnd_sat(Word64 Rss, Word64 Rtt)
-   Instruction Type:      M
-   Execution Slots:       SLOT3
-   ========================================================================== */
-
-#define Q6_R_cmpyrw_PP_conj_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyrwc_rnd
-#endif /* __HEXAGON_ARCH___ >= 67  && defined __HEXAGON_AUDIO__*/
-
-#if __HEXAGON_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       dmlink(Rs32,Rt32)
-   C Intrinsic Prototype: void Q6_dmlink_AA(Address Rs, Address Rt)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dmlink_AA __builtin_HEXAGON_Y6_dmlink
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HEXAGON_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Rd32=dmpause
-   C Intrinsic Prototype: Word32 Q6_R_dmpause()
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_R_dmpause __builtin_HEXAGON_Y6_dmpause
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HEXAGON_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Rd32=dmpoll
-   C Intrinsic Prototype: Word32 Q6_R_dmpoll()
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_R_dmpoll __builtin_HEXAGON_Y6_dmpoll
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HEXAGON_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       dmresume(Rs32)
-   C Intrinsic Prototype: void Q6_dmresume_A(Address Rs)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dmresume_A __builtin_HEXAGON_Y6_dmresume
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HEXAGON_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       dmstart(Rs32)
-   C Intrinsic Prototype: void Q6_dmstart_A(Address Rs)
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_dmstart_A __builtin_HEXAGON_Y6_dmstart
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HEXAGON_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Rd32=dmwait
-   C Intrinsic Prototype: Word32 Q6_R_dmwait()
-   Instruction Type:      ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_R_dmwait __builtin_HEXAGON_Y6_dmwait
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#include <hexagon_circ_brev_intrinsics.h>
-#ifdef __HVX__
-#include <hvx_hexagon_protos.h>
-#endif /* __HVX__ */
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/hexagon_types.h b/linux-x86/lib64/clang/14.0.2/include/hexagon_types.h
deleted file mode 100644
index 6958809..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/hexagon_types.h
+++ /dev/null
@@ -1,2653 +0,0 @@
-/******************************************************************************/
-/*   (c) 2020 Qualcomm Innovation Center, Inc. All rights reserved.           */
-/*                                                                            */
-/******************************************************************************/
-#ifndef HEXAGON_TYPES_H
-#define HEXAGON_TYPES_H
-
-#include <hexagon_protos.h>
-
-/* Hexagon names */
-#define HEXAGON_Vect HEXAGON_Vect64
-#define HEXAGON_V_GET_D HEXAGON_V64_GET_D
-#define HEXAGON_V_GET_UD HEXAGON_V64_GET_UD
-#define HEXAGON_V_GET_W0 HEXAGON_V64_GET_W0
-#define HEXAGON_V_GET_W1 HEXAGON_V64_GET_W1
-#define HEXAGON_V_GET_UW0 HEXAGON_V64_GET_UW0
-#define HEXAGON_V_GET_UW1 HEXAGON_V64_GET_UW1
-#define HEXAGON_V_GET_H0 HEXAGON_V64_GET_H0
-#define HEXAGON_V_GET_H1 HEXAGON_V64_GET_H1
-#define HEXAGON_V_GET_H2 HEXAGON_V64_GET_H2
-#define HEXAGON_V_GET_H3 HEXAGON_V64_GET_H3
-#define HEXAGON_V_GET_UH0 HEXAGON_V64_GET_UH0
-#define HEXAGON_V_GET_UH1 HEXAGON_V64_GET_UH1
-#define HEXAGON_V_GET_UH2 HEXAGON_V64_GET_UH2
-#define HEXAGON_V_GET_UH3 HEXAGON_V64_GET_UH3
-#define HEXAGON_V_GET_B0 HEXAGON_V64_GET_B0
-#define HEXAGON_V_GET_B1 HEXAGON_V64_GET_B1
-#define HEXAGON_V_GET_B2 HEXAGON_V64_GET_B2
-#define HEXAGON_V_GET_B3 HEXAGON_V64_GET_B3
-#define HEXAGON_V_GET_B4 HEXAGON_V64_GET_B4
-#define HEXAGON_V_GET_B5 HEXAGON_V64_GET_B5
-#define HEXAGON_V_GET_B6 HEXAGON_V64_GET_B6
-#define HEXAGON_V_GET_B7 HEXAGON_V64_GET_B7
-#define HEXAGON_V_GET_UB0 HEXAGON_V64_GET_UB0
-#define HEXAGON_V_GET_UB1 HEXAGON_V64_GET_UB1
-#define HEXAGON_V_GET_UB2 HEXAGON_V64_GET_UB2
-#define HEXAGON_V_GET_UB3 HEXAGON_V64_GET_UB3
-#define HEXAGON_V_GET_UB4 HEXAGON_V64_GET_UB4
-#define HEXAGON_V_GET_UB5 HEXAGON_V64_GET_UB5
-#define HEXAGON_V_GET_UB6 HEXAGON_V64_GET_UB6
-#define HEXAGON_V_GET_UB7 HEXAGON_V64_GET_UB7
-#define HEXAGON_V_PUT_D HEXAGON_V64_PUT_D
-#define HEXAGON_V_PUT_W0 HEXAGON_V64_PUT_W0
-#define HEXAGON_V_PUT_W1 HEXAGON_V64_PUT_W1
-#define HEXAGON_V_PUT_H0 HEXAGON_V64_PUT_H0
-#define HEXAGON_V_PUT_H1 HEXAGON_V64_PUT_H1
-#define HEXAGON_V_PUT_H2 HEXAGON_V64_PUT_H2
-#define HEXAGON_V_PUT_H3 HEXAGON_V64_PUT_H3
-#define HEXAGON_V_PUT_B0 HEXAGON_V64_PUT_B0
-#define HEXAGON_V_PUT_B1 HEXAGON_V64_PUT_B1
-#define HEXAGON_V_PUT_B2 HEXAGON_V64_PUT_B2
-#define HEXAGON_V_PUT_B3 HEXAGON_V64_PUT_B3
-#define HEXAGON_V_PUT_B4 HEXAGON_V64_PUT_B4
-#define HEXAGON_V_PUT_B5 HEXAGON_V64_PUT_B5
-#define HEXAGON_V_PUT_B6 HEXAGON_V64_PUT_B6
-#define HEXAGON_V_PUT_B7 HEXAGON_V64_PUT_B7
-#define HEXAGON_V_CREATE_D HEXAGON_V64_CREATE_D
-#define HEXAGON_V_CREATE_W HEXAGON_V64_CREATE_W
-#define HEXAGON_V_CREATE_H HEXAGON_V64_CREATE_H
-#define HEXAGON_V_CREATE_B HEXAGON_V64_CREATE_B
-
-#ifdef __cplusplus
-#define HEXAGON_VectC HEXAGON_Vect64C
-#endif /* __cplusplus */
-
-/* 64 Bit Vectors */
-
-typedef long long __attribute__((__may_alias__)) HEXAGON_Vect64;
-
-/* Extract doubleword macros */
-
-#define HEXAGON_V64_GET_D(v) (v)
-#define HEXAGON_V64_GET_UD(v) ((unsigned long long)(v))
-
-/* Extract word macros */
-
-#define HEXAGON_V64_GET_W0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.w[0];                                                \
-  })
-#define HEXAGON_V64_GET_W1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.w[1];                                                \
-  })
-#define HEXAGON_V64_GET_UW0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned int uw[2];                                                      \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.uw[0];                                               \
-  })
-#define HEXAGON_V64_GET_UW1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned int uw[2];                                                      \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.uw[1];                                               \
-  })
-
-/* Extract half word macros */
-
-#define HEXAGON_V64_GET_H0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[0];                                                \
-  })
-#define HEXAGON_V64_GET_H1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[1];                                                \
-  })
-#define HEXAGON_V64_GET_H2(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[2];                                                \
-  })
-#define HEXAGON_V64_GET_H3(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[3];                                                \
-  })
-#define HEXAGON_V64_GET_UH0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.uh[0];                                               \
-  })
-#define HEXAGON_V64_GET_UH1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.uh[1];                                               \
-  })
-#define HEXAGON_V64_GET_UH2(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.uh[2];                                               \
-  })
-#define HEXAGON_V64_GET_UH3(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.uh[3];                                               \
-  })
-
-/* Extract byte macros */
-
-#define HEXAGON_V64_GET_B0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[0];                                                \
-  })
-#define HEXAGON_V64_GET_B1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[1];                                                \
-  })
-#define HEXAGON_V64_GET_B2(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[2];                                                \
-  })
-#define HEXAGON_V64_GET_B3(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[3];                                                \
-  })
-#define HEXAGON_V64_GET_B4(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[4];                                                \
-  })
-#define HEXAGON_V64_GET_B5(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[5];                                                \
-  })
-#define HEXAGON_V64_GET_B6(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[6];                                                \
-  })
-#define HEXAGON_V64_GET_B7(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[7];                                                \
-  })
-#define HEXAGON_V64_GET_UB0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[0];                                               \
-  })
-#define HEXAGON_V64_GET_UB1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[1];                                               \
-  })
-#define HEXAGON_V64_GET_UB2(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[2];                                               \
-  })
-#define HEXAGON_V64_GET_UB3(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[3];                                               \
-  })
-#define HEXAGON_V64_GET_UB4(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[4];                                               \
-  })
-#define HEXAGON_V64_GET_UB5(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[5];                                               \
-  })
-#define HEXAGON_V64_GET_UB6(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[6];                                               \
-  })
-#define HEXAGON_V64_GET_UB7(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.ub[7];                                               \
-  })
-
-/* NOTE: All set macros return a HEXAGON_Vect64 type */
-
-/* Set doubleword macro */
-
-#define HEXAGON_V64_PUT_D(v, new) (new)
-
-/* Set word macros */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V64_PUT_W0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.w[0] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_W1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.w[1] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V64_PUT_W0(v, new)                                                   \
-  (((v) & 0xffffffff00000000LL) | ((HEXAGON_Vect64)((unsigned int)(new))))
-#define HEXAGON_V64_PUT_W1(v, new)                                                   \
-  (((v) & 0x00000000ffffffffLL) | (((HEXAGON_Vect64)(new)) << 32LL))
-
-#endif /* !__hexagon__ */
-
-/* Set half word macros */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V64_PUT_H0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[0] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_H1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[1] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_H2(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[2] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_H3(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.h[3] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V64_PUT_H0(v, new)                                                   \
-  (((v) & 0xffffffffffff0000LL) | ((HEXAGON_Vect64)((unsigned short)(new))))
-#define HEXAGON_V64_PUT_H1(v, new)                                                   \
-  (((v) & 0xffffffff0000ffffLL) | (((HEXAGON_Vect64)((unsigned short)(new))) << 16LL))
-#define HEXAGON_V64_PUT_H2(v, new)                                                   \
-  (((v) & 0xffff0000ffffffffLL) | (((HEXAGON_Vect64)((unsigned short)(new))) << 32LL))
-#define HEXAGON_V64_PUT_H3(v, new)                                                   \
-  (((v) & 0x0000ffffffffffffLL) | (((HEXAGON_Vect64)(new)) << 48LL))
-
-#endif /* !__hexagon__ */
-
-/* Set byte macros */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V64_PUT_B0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[0] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[1] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B2(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[2] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B3(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[3] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B4(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[4] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B5(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[5] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B6(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[6] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-#define HEXAGON_V64_PUT_B7(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.d = (v);                                             \
-    _HEXAGON_V64_internal_union.b[7] = (new);                                        \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V64_PUT_B0(v, new)                                                   \
-  (((v) & 0xffffffffffffff00LL) | ((HEXAGON_Vect64)((unsigned char)(new))))
-#define HEXAGON_V64_PUT_B1(v, new)                                                   \
-  (((v) & 0xffffffffffff00ffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 8LL))
-#define HEXAGON_V64_PUT_B2(v, new)                                                   \
-  (((v) & 0xffffffffff00ffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 16LL))
-#define HEXAGON_V64_PUT_B3(v, new)                                                   \
-  (((v) & 0xffffffff00ffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 24LL))
-#define HEXAGON_V64_PUT_B4(v, new)                                                   \
-  (((v) & 0xffffff00ffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 32LL))
-#define HEXAGON_V64_PUT_B5(v, new)                                                   \
-  (((v) & 0xffff00ffffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 40LL))
-#define HEXAGON_V64_PUT_B6(v, new)                                                   \
-  (((v) & 0xff00ffffffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 48LL))
-#define HEXAGON_V64_PUT_B7(v, new)                                                   \
-  (((v) & 0x00ffffffffffffffLL) | (((HEXAGON_Vect64)(new)) << 56LL))
-
-#endif /* !__hexagon__ */
-
-/* NOTE: All create macros return a HEXAGON_Vect64 type */
-
-/* Create from a doubleword */
-
-#define HEXAGON_V64_CREATE_D(d) (d)
-
-/* Create from words */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V64_CREATE_W(w1, w0)                                                 \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.w[0] = (w0);                                         \
-    _HEXAGON_V64_internal_union.w[1] = (w1);                                         \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V64_CREATE_W(w1, w0)                                                 \
-  ((((HEXAGON_Vect64)(w1)) << 32LL) | ((HEXAGON_Vect64)((w0) & 0xffffffff)))
-
-#endif /* !__hexagon__ */
-
-/* Create from half words */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V64_CREATE_H(h3, h2, h1, h0)                                         \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.h[0] = (h0);                                         \
-    _HEXAGON_V64_internal_union.h[1] = (h1);                                         \
-    _HEXAGON_V64_internal_union.h[2] = (h2);                                         \
-    _HEXAGON_V64_internal_union.h[3] = (h3);                                         \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V64_CREATE_H(h3, h2, h1, h0)                                         \
-  ((((HEXAGON_Vect64)(h3)) << 48LL) | (((HEXAGON_Vect64)((h2) & 0xffff)) << 32LL) |        \
-   (((HEXAGON_Vect64)((h1) & 0xffff)) << 16LL) | ((HEXAGON_Vect64)((h0) & 0xffff)))
-
-#endif /* !__hexagon__ */
-
-/* Create from bytes */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)                         \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _HEXAGON_V64_internal_union;                                                   \
-    _HEXAGON_V64_internal_union.b[0] = (b0);                                         \
-    _HEXAGON_V64_internal_union.b[1] = (b1);                                         \
-    _HEXAGON_V64_internal_union.b[2] = (b2);                                         \
-    _HEXAGON_V64_internal_union.b[3] = (b3);                                         \
-    _HEXAGON_V64_internal_union.b[4] = (b4);                                         \
-    _HEXAGON_V64_internal_union.b[5] = (b5);                                         \
-    _HEXAGON_V64_internal_union.b[6] = (b6);                                         \
-    _HEXAGON_V64_internal_union.b[7] = (b7);                                         \
-    _HEXAGON_V64_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)                         \
-  ((((HEXAGON_Vect64)(b7)) << 56LL) | (((HEXAGON_Vect64)((b6) & 0xff)) << 48LL) |          \
-   (((HEXAGON_Vect64)((b5) & 0xff)) << 40LL) | (((HEXAGON_Vect64)((b4) & 0xff)) << 32LL) | \
-   (((HEXAGON_Vect64)((b3) & 0xff)) << 24LL) | (((HEXAGON_Vect64)((b2) & 0xff)) << 16LL) | \
-   (((HEXAGON_Vect64)((b1) & 0xff)) << 8LL) | ((HEXAGON_Vect64)((b0) & 0xff)))
-
-#endif /* !__hexagon__ */
-
-#ifdef __cplusplus
-
-class HEXAGON_Vect64C {
-public:
-  // Constructors
-  HEXAGON_Vect64C(long long d = 0) : data(d) {};
-  HEXAGON_Vect64C(int w1, int w0) : data(HEXAGON_V64_CREATE_W(w1, w0)) {};
-  HEXAGON_Vect64C(short h3, short h2, short h1, short h0)
-      : data(HEXAGON_V64_CREATE_H(h3, h2, h1, h0)) {};
-  HEXAGON_Vect64C(signed char b7, signed char b6, signed char b5, signed char b4,
-            signed char b3, signed char b2, signed char b1, signed char b0)
-      : data(HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)) {};
-  HEXAGON_Vect64C(const HEXAGON_Vect64C &v) : data(v.data) {};
-
-  HEXAGON_Vect64C &operator=(const HEXAGON_Vect64C &v) {
-    data = v.data;
-    return *this;
-  };
-
-  operator long long() {
-    return data;
-  };
-
-  // Extract doubleword methods
-  long long D(void) {
-    return HEXAGON_V64_GET_D(data);
-  };
-  unsigned long long UD(void) {
-    return HEXAGON_V64_GET_UD(data);
-  };
-
-  // Extract word methods
-  int W0(void) {
-    return HEXAGON_V64_GET_W0(data);
-  };
-  int W1(void) {
-    return HEXAGON_V64_GET_W1(data);
-  };
-  unsigned int UW0(void) {
-    return HEXAGON_V64_GET_UW0(data);
-  };
-  unsigned int UW1(void) {
-    return HEXAGON_V64_GET_UW1(data);
-  };
-
-  // Extract half word methods
-  short H0(void) {
-    return HEXAGON_V64_GET_H0(data);
-  };
-  short H1(void) {
-    return HEXAGON_V64_GET_H1(data);
-  };
-  short H2(void) {
-    return HEXAGON_V64_GET_H2(data);
-  };
-  short H3(void) {
-    return HEXAGON_V64_GET_H3(data);
-  };
-  unsigned short UH0(void) {
-    return HEXAGON_V64_GET_UH0(data);
-  };
-  unsigned short UH1(void) {
-    return HEXAGON_V64_GET_UH1(data);
-  };
-  unsigned short UH2(void) {
-    return HEXAGON_V64_GET_UH2(data);
-  };
-  unsigned short UH3(void) {
-    return HEXAGON_V64_GET_UH3(data);
-  };
-
-  // Extract byte methods
-  signed char B0(void) {
-    return HEXAGON_V64_GET_B0(data);
-  };
-  signed char B1(void) {
-    return HEXAGON_V64_GET_B1(data);
-  };
-  signed char B2(void) {
-    return HEXAGON_V64_GET_B2(data);
-  };
-  signed char B3(void) {
-    return HEXAGON_V64_GET_B3(data);
-  };
-  signed char B4(void) {
-    return HEXAGON_V64_GET_B4(data);
-  };
-  signed char B5(void) {
-    return HEXAGON_V64_GET_B5(data);
-  };
-  signed char B6(void) {
-    return HEXAGON_V64_GET_B6(data);
-  };
-  signed char B7(void) {
-    return HEXAGON_V64_GET_B7(data);
-  };
-  unsigned char UB0(void) {
-    return HEXAGON_V64_GET_UB0(data);
-  };
-  unsigned char UB1(void) {
-    return HEXAGON_V64_GET_UB1(data);
-  };
-  unsigned char UB2(void) {
-    return HEXAGON_V64_GET_UB2(data);
-  };
-  unsigned char UB3(void) {
-    return HEXAGON_V64_GET_UB3(data);
-  };
-  unsigned char UB4(void) {
-    return HEXAGON_V64_GET_UB4(data);
-  };
-  unsigned char UB5(void) {
-    return HEXAGON_V64_GET_UB5(data);
-  };
-  unsigned char UB6(void) {
-    return HEXAGON_V64_GET_UB6(data);
-  };
-  unsigned char UB7(void) {
-    return HEXAGON_V64_GET_UB7(data);
-  };
-
-  // NOTE: All set methods return a HEXAGON_Vect64C type
-
-  // Set doubleword method
-  HEXAGON_Vect64C D(long long d) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_D(data, d));
-  };
-
-  // Set word methods
-  HEXAGON_Vect64C W0(int w) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_W0(data, w));
-  };
-  HEXAGON_Vect64C W1(int w) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_W1(data, w));
-  };
-
-  // Set half word methods
-  HEXAGON_Vect64C H0(short h) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_H0(data, h));
-  };
-  HEXAGON_Vect64C H1(short h) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_H1(data, h));
-  };
-  HEXAGON_Vect64C H2(short h) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_H2(data, h));
-  };
-  HEXAGON_Vect64C H3(short h) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_H3(data, h));
-  };
-
-  // Set byte methods
-  HEXAGON_Vect64C B0(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B0(data, b));
-  };
-  HEXAGON_Vect64C B1(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B1(data, b));
-  };
-  HEXAGON_Vect64C B2(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B2(data, b));
-  };
-  HEXAGON_Vect64C B3(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B3(data, b));
-  };
-  HEXAGON_Vect64C B4(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B4(data, b));
-  };
-  HEXAGON_Vect64C B5(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B5(data, b));
-  };
-  HEXAGON_Vect64C B6(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B6(data, b));
-  };
-  HEXAGON_Vect64C B7(signed char b) {
-    return HEXAGON_Vect64C(HEXAGON_V64_PUT_B7(data, b));
-  };
-
-private:
-  long long data;
-};
-
-#endif /* __cplusplus */
-
-/* 32 Bit Vectors */
-
-typedef int HEXAGON_Vect32;
-
-/* Extract word macros */
-
-#define HEXAGON_V32_GET_W(v) (v)
-#define HEXAGON_V32_GET_UW(v) ((unsigned int)(v))
-
-/* Extract half word macros */
-
-#define HEXAGON_V32_GET_H0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.h[0];                                                \
-  })
-#define HEXAGON_V32_GET_H1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.h[1];                                                \
-  })
-#define HEXAGON_V32_GET_UH0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned short uh[2];                                                    \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.uh[0];                                               \
-  })
-#define HEXAGON_V32_GET_UH1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned short uh[2];                                                    \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.uh[1];                                               \
-  })
-
-/* Extract byte macros */
-
-#define HEXAGON_V32_GET_B0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[0];                                                \
-  })
-#define HEXAGON_V32_GET_B1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[1];                                                \
-  })
-#define HEXAGON_V32_GET_B2(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[2];                                                \
-  })
-#define HEXAGON_V32_GET_B3(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[3];                                                \
-  })
-#define HEXAGON_V32_GET_UB0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.ub[0];                                               \
-  })
-#define HEXAGON_V32_GET_UB1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.ub[1];                                               \
-  })
-#define HEXAGON_V32_GET_UB2(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.ub[2];                                               \
-  })
-#define HEXAGON_V32_GET_UB3(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.ub[3];                                               \
-  })
-
-/* NOTE: All set macros return a HEXAGON_Vect32 type */
-
-/* Set word macro */
-
-#define HEXAGON_V32_PUT_W(v, new) (new)
-
-/* Set half word macros */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V32_PUT_H0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.h[0] = (new);                                        \
-    _HEXAGON_V32_internal_union.w;                                                   \
-  })
-#define HEXAGON_V32_PUT_H1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.h[1] = (new);                                        \
-    _HEXAGON_V32_internal_union.w;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V32_PUT_H0(v, new)                                                   \
-  (((v) & 0xffff0000) | ((HEXAGON_Vect32)((unsigned short)(new))))
-#define HEXAGON_V32_PUT_H1(v, new) (((v) & 0x0000ffff) | (((HEXAGON_Vect32)(new)) << 16))
-
-#endif /* !__hexagon__ */
-
-/* Set byte macros */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V32_PUT_B0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[0] = (new);                                        \
-    _HEXAGON_V32_internal_union.w;                                                   \
-  })
-#define HEXAGON_V32_PUT_B1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[1] = (new);                                        \
-    _HEXAGON_V32_internal_union.w;                                                   \
-  })
-#define HEXAGON_V32_PUT_B2(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[2] = (new);                                        \
-    _HEXAGON_V32_internal_union.w;                                                   \
-  })
-#define HEXAGON_V32_PUT_B3(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.w = (v);                                             \
-    _HEXAGON_V32_internal_union.b[3] = (new);                                        \
-    _HEXAGON_V32_internal_union.w;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V32_PUT_B0(v, new)                                                   \
-  (((v) & 0xffffff00) | ((HEXAGON_Vect32)((unsigned char)(new))))
-#define HEXAGON_V32_PUT_B1(v, new)                                                   \
-  (((v) & 0xffff00ff) | (((HEXAGON_Vect32)((unsigned char)(new))) << 8))
-#define HEXAGON_V32_PUT_B2(v, new)                                                   \
-  (((v) & 0xff00ffff) | (((HEXAGON_Vect32)((unsigned char)(new))) << 16))
-#define HEXAGON_V32_PUT_B3(v, new) (((v) & 0x00ffffff) | (((HEXAGON_Vect32)(new)) << 24))
-
-#endif /* !__hexagon__ */
-
-/* NOTE: All create macros return a HEXAGON_Vect32 type */
-
-/* Create from a word */
-
-#define HEXAGON_V32_CREATE_W(w) (w)
-
-/* Create from half words */
-
-#ifdef __hexagon__
-
-#define HEXAGON_V32_CREATE_H(h1, h0)                                                 \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[2];                                                              \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.h[0] = (h0);                                         \
-    _HEXAGON_V32_internal_union.h[1] = (h1);                                         \
-    _HEXAGON_V32_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V32_CREATE_H(h1, h0)                                                 \
-  ((((HEXAGON_Vect32)(h1)) << 16) | ((HEXAGON_Vect32)((h0) & 0xffff)))
-
-#endif /* !__hexagon__ */
-
-/* Create from bytes */
-#ifdef __hexagon__
-
-#define HEXAGON_V32_CREATE_B(b3, b2, b1, b0)                                         \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[4];                                                               \
-    } _HEXAGON_V32_internal_union;                                                   \
-    _HEXAGON_V32_internal_union.b[0] = (b0);                                         \
-    _HEXAGON_V32_internal_union.b[1] = (b1);                                         \
-    _HEXAGON_V32_internal_union.b[2] = (b2);                                         \
-    _HEXAGON_V32_internal_union.b[3] = (b3);                                         \
-    _HEXAGON_V32_internal_union.d;                                                   \
-  })
-
-#else /* !__hexagon__ */
-
-#define HEXAGON_V32_CREATE_B(b3, b2, b1, b0)                                         \
-  ((((HEXAGON_Vect32)(b3)) << 24) | (((HEXAGON_Vect32)((b2) & 0xff)) << 16) |              \
-   (((HEXAGON_Vect32)((b1) & 0xff)) << 8) | ((HEXAGON_Vect32)((b0) & 0xff)))
-
-#endif /* !__hexagon__ */
-
-#ifdef __cplusplus
-
-class HEXAGON_Vect32C {
-public:
-  // Constructors
-  HEXAGON_Vect32C(int w = 0) : data(w) {};
-  HEXAGON_Vect32C(short h1, short h0) : data(HEXAGON_V32_CREATE_H(h1, h0)) {};
-  HEXAGON_Vect32C(signed char b3, signed char b2, signed char b1, signed char b0)
-      : data(HEXAGON_V32_CREATE_B(b3, b2, b1, b0)) {};
-  HEXAGON_Vect32C(const HEXAGON_Vect32C &v) : data(v.data) {};
-
-  HEXAGON_Vect32C &operator=(const HEXAGON_Vect32C &v) {
-    data = v.data;
-    return *this;
-  };
-
-  operator int() {
-    return data;
-  };
-
-  // Extract word methods
-  int W(void) {
-    return HEXAGON_V32_GET_W(data);
-  };
-  unsigned int UW(void) {
-    return HEXAGON_V32_GET_UW(data);
-  };
-
-  // Extract half word methods
-  short H0(void) {
-    return HEXAGON_V32_GET_H0(data);
-  };
-  short H1(void) {
-    return HEXAGON_V32_GET_H1(data);
-  };
-  unsigned short UH0(void) {
-    return HEXAGON_V32_GET_UH0(data);
-  };
-  unsigned short UH1(void) {
-    return HEXAGON_V32_GET_UH1(data);
-  };
-
-  // Extract byte methods
-  signed char B0(void) {
-    return HEXAGON_V32_GET_B0(data);
-  };
-  signed char B1(void) {
-    return HEXAGON_V32_GET_B1(data);
-  };
-  signed char B2(void) {
-    return HEXAGON_V32_GET_B2(data);
-  };
-  signed char B3(void) {
-    return HEXAGON_V32_GET_B3(data);
-  };
-  unsigned char UB0(void) {
-    return HEXAGON_V32_GET_UB0(data);
-  };
-  unsigned char UB1(void) {
-    return HEXAGON_V32_GET_UB1(data);
-  };
-  unsigned char UB2(void) {
-    return HEXAGON_V32_GET_UB2(data);
-  };
-  unsigned char UB3(void) {
-    return HEXAGON_V32_GET_UB3(data);
-  };
-
-  // NOTE: All set methods return a HEXAGON_Vect32C type
-
-  // Set word method
-  HEXAGON_Vect32C W(int w) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_W(data, w));
-  };
-
-  // Set half word methods
-  HEXAGON_Vect32C H0(short h) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_H0(data, h));
-  };
-  HEXAGON_Vect32C H1(short h) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_H1(data, h));
-  };
-
-  // Set byte methods
-  HEXAGON_Vect32C B0(signed char b) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_B0(data, b));
-  };
-  HEXAGON_Vect32C B1(signed char b) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_B1(data, b));
-  };
-  HEXAGON_Vect32C B2(signed char b) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_B2(data, b));
-  };
-  HEXAGON_Vect32C B3(signed char b) {
-    return HEXAGON_Vect32C(HEXAGON_V32_PUT_B3(data, b));
-  };
-
-private:
-  int data;
-};
-
-#endif /* __cplusplus */
-
-// V65 Silver types
-#if __Q6S_ARCH__ >= 65
-  // Silver vector types are 128 bytes, and pairs are 256. The vector predicate
-  // types are 16 bytes and 32 bytes for pairs.
-  typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(16)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_VecPred256 __attribute__((__vector_size__(32)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(256)));
-
-  typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(4)));
-
-  typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(4)));
-
-  #define Q6S_VectorPredPair HEXAGON_VecPred256
-  #define Q6S_VectorPred     HEXAGON_VecPred128
-  #define Q6S_Vector         HEXAGON_Vect1024
-  #define Q6S_VectorPair     HEXAGON_Vect2048
-  #define Q6S_UVector        HEXAGON_UVect1024
-  #define Q6S_UVectorPair    HEXAGON_UVect2048
-
-#else /* __Q6S_ARCH__ >= 65 */
-
-// V65 Vector types
-#if __HVX_ARCH__ >= 65
-#if defined __HVX__ && (__HVX_LENGTH__ == 128)
-  typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(256)));
-
-  typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(4)));
-
-  typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(4)));
-
-  #define HVX_VectorPred     HEXAGON_VecPred128
-  #define HVX_Vector         HEXAGON_Vect1024
-  #define HVX_VectorPair     HEXAGON_Vect2048
-  #define HVX_UVector        HEXAGON_UVect1024
-  #define HVX_UVectorPair    HEXAGON_UVect2048
-#else /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
-#if defined __HVX__ &&  (__HVX_LENGTH__ == 64)
-  typedef long HEXAGON_VecPred64 __attribute__((__vector_size__(64)))
-    __attribute__((aligned(64)));
-
-  typedef long HEXAGON_Vect512 __attribute__((__vector_size__(64)))
-    __attribute__((aligned(64)));
-
-  typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_UVect512 __attribute__((__vector_size__(64)))
-    __attribute__((aligned(4)));
-
-  typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(4)));
-
-  #define HVX_VectorPred     HEXAGON_VecPred64
-  #define HVX_Vector         HEXAGON_Vect512
-  #define HVX_VectorPair     HEXAGON_Vect1024
-  #define HVX_UVector        HEXAGON_UVect512
-  #define HVX_UVectorPair    HEXAGON_UVect1024
-#endif /* defined __HVX__ &&  (__HVX_LENGTH__ == 64) */
-#endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
-#endif /* __HVX_ARCH__ >= 65 */
-#endif /* __Q6S_ARCH__ >= 65 */
-
-/* Predicates */
-
-typedef int HEXAGON_Pred;
-
-/***
- *** backward compatibility aliases
- ***/
-
-/* Old names */
-#define Q6Vect Q6Vect64
-#define Q6V_GET_D Q6V64_GET_D
-#define Q6V_GET_UD Q6V64_GET_UD
-#define Q6V_GET_W0 Q6V64_GET_W0
-#define Q6V_GET_W1 Q6V64_GET_W1
-#define Q6V_GET_UW0 Q6V64_GET_UW0
-#define Q6V_GET_UW1 Q6V64_GET_UW1
-#define Q6V_GET_H0 Q6V64_GET_H0
-#define Q6V_GET_H1 Q6V64_GET_H1
-#define Q6V_GET_H2 Q6V64_GET_H2
-#define Q6V_GET_H3 Q6V64_GET_H3
-#define Q6V_GET_UH0 Q6V64_GET_UH0
-#define Q6V_GET_UH1 Q6V64_GET_UH1
-#define Q6V_GET_UH2 Q6V64_GET_UH2
-#define Q6V_GET_UH3 Q6V64_GET_UH3
-#define Q6V_GET_B0 Q6V64_GET_B0
-#define Q6V_GET_B1 Q6V64_GET_B1
-#define Q6V_GET_B2 Q6V64_GET_B2
-#define Q6V_GET_B3 Q6V64_GET_B3
-#define Q6V_GET_B4 Q6V64_GET_B4
-#define Q6V_GET_B5 Q6V64_GET_B5
-#define Q6V_GET_B6 Q6V64_GET_B6
-#define Q6V_GET_B7 Q6V64_GET_B7
-#define Q6V_GET_UB0 Q6V64_GET_UB0
-#define Q6V_GET_UB1 Q6V64_GET_UB1
-#define Q6V_GET_UB2 Q6V64_GET_UB2
-#define Q6V_GET_UB3 Q6V64_GET_UB3
-#define Q6V_GET_UB4 Q6V64_GET_UB4
-#define Q6V_GET_UB5 Q6V64_GET_UB5
-#define Q6V_GET_UB6 Q6V64_GET_UB6
-#define Q6V_GET_UB7 Q6V64_GET_UB7
-#define Q6V_PUT_D Q6V64_PUT_D
-#define Q6V_PUT_W0 Q6V64_PUT_W0
-#define Q6V_PUT_W1 Q6V64_PUT_W1
-#define Q6V_PUT_H0 Q6V64_PUT_H0
-#define Q6V_PUT_H1 Q6V64_PUT_H1
-#define Q6V_PUT_H2 Q6V64_PUT_H2
-#define Q6V_PUT_H3 Q6V64_PUT_H3
-#define Q6V_PUT_B0 Q6V64_PUT_B0
-#define Q6V_PUT_B1 Q6V64_PUT_B1
-#define Q6V_PUT_B2 Q6V64_PUT_B2
-#define Q6V_PUT_B3 Q6V64_PUT_B3
-#define Q6V_PUT_B4 Q6V64_PUT_B4
-#define Q6V_PUT_B5 Q6V64_PUT_B5
-#define Q6V_PUT_B6 Q6V64_PUT_B6
-#define Q6V_PUT_B7 Q6V64_PUT_B7
-#define Q6V_CREATE_D Q6V64_CREATE_D
-#define Q6V_CREATE_W Q6V64_CREATE_W
-#define Q6V_CREATE_H Q6V64_CREATE_H
-#define Q6V_CREATE_B Q6V64_CREATE_B
-
-#ifdef __cplusplus
-#define Q6VectC Q6Vect64C
-#endif /* __cplusplus */
-
-/* 64 Bit Vectors */
-
-typedef long long __attribute__((__may_alias__)) Q6Vect64;
-
-/* Extract doubleword macros */
-
-#define Q6V64_GET_D(v) (v)
-#define Q6V64_GET_UD(v) ((unsigned long long)(v))
-
-/* Extract word macros */
-
-#define Q6V64_GET_W0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.w[0];                                                \
-  })
-#define Q6V64_GET_W1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.w[1];                                                \
-  })
-#define Q6V64_GET_UW0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned int uw[2];                                                      \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.uw[0];                                               \
-  })
-#define Q6V64_GET_UW1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned int uw[2];                                                      \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.uw[1];                                               \
-  })
-
-/* Extract half word macros */
-
-#define Q6V64_GET_H0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[0];                                                \
-  })
-#define Q6V64_GET_H1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[1];                                                \
-  })
-#define Q6V64_GET_H2(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[2];                                                \
-  })
-#define Q6V64_GET_H3(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[3];                                                \
-  })
-#define Q6V64_GET_UH0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.uh[0];                                               \
-  })
-#define Q6V64_GET_UH1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.uh[1];                                               \
-  })
-#define Q6V64_GET_UH2(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.uh[2];                                               \
-  })
-#define Q6V64_GET_UH3(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned short uh[4];                                                    \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.uh[3];                                               \
-  })
-
-/* Extract byte macros */
-
-#define Q6V64_GET_B0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[0];                                                \
-  })
-#define Q6V64_GET_B1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[1];                                                \
-  })
-#define Q6V64_GET_B2(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[2];                                                \
-  })
-#define Q6V64_GET_B3(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[3];                                                \
-  })
-#define Q6V64_GET_B4(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[4];                                                \
-  })
-#define Q6V64_GET_B5(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[5];                                                \
-  })
-#define Q6V64_GET_B6(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[6];                                                \
-  })
-#define Q6V64_GET_B7(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      signed char b[8];                                                        \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[7];                                                \
-  })
-#define Q6V64_GET_UB0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[0];                                               \
-  })
-#define Q6V64_GET_UB1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[1];                                               \
-  })
-#define Q6V64_GET_UB2(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[2];                                               \
-  })
-#define Q6V64_GET_UB3(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[3];                                               \
-  })
-#define Q6V64_GET_UB4(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[4];                                               \
-  })
-#define Q6V64_GET_UB5(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[5];                                               \
-  })
-#define Q6V64_GET_UB6(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[6];                                               \
-  })
-#define Q6V64_GET_UB7(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      unsigned char ub[8];                                                     \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.ub[7];                                               \
-  })
-
-/* NOTE: All set macros return a Q6Vect64 type */
-
-/* Set doubleword macro */
-
-#define Q6V64_PUT_D(v, new) (new)
-
-/* Set word macros */
-
-#ifdef __qdsp6__
-
-#define Q6V64_PUT_W0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.w[0] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_W1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.w[1] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V64_PUT_W0(v, new)                                                   \
-  (((v) & 0xffffffff00000000LL) | ((Q6Vect64)((unsigned int)(new))))
-#define Q6V64_PUT_W1(v, new)                                                   \
-  (((v) & 0x00000000ffffffffLL) | (((Q6Vect64)(new)) << 32LL))
-
-#endif /* !__qdsp6__ */
-
-/* Set half word macros */
-
-#ifdef __qdsp6__
-
-#define Q6V64_PUT_H0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[0] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_H1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[1] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_H2(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[2] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_H3(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.h[3] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V64_PUT_H0(v, new)                                                   \
-  (((v) & 0xffffffffffff0000LL) | ((Q6Vect64)((unsigned short)(new))))
-#define Q6V64_PUT_H1(v, new)                                                   \
-  (((v) & 0xffffffff0000ffffLL) | (((Q6Vect64)((unsigned short)(new))) << 16LL))
-#define Q6V64_PUT_H2(v, new)                                                   \
-  (((v) & 0xffff0000ffffffffLL) | (((Q6Vect64)((unsigned short)(new))) << 32LL))
-#define Q6V64_PUT_H3(v, new)                                                   \
-  (((v) & 0x0000ffffffffffffLL) | (((Q6Vect64)(new)) << 48LL))
-
-#endif /* !__qdsp6__ */
-
-/* Set byte macros */
-
-#ifdef __qdsp6__
-
-#define Q6V64_PUT_B0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[0] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[1] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B2(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[2] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B3(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[3] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B4(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[4] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B5(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[5] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B6(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[6] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-#define Q6V64_PUT_B7(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.d = (v);                                             \
-    _Q6V64_internal_union.b[7] = (new);                                        \
-    _Q6V64_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V64_PUT_B0(v, new)                                                   \
-  (((v) & 0xffffffffffffff00LL) | ((Q6Vect64)((unsigned char)(new))))
-#define Q6V64_PUT_B1(v, new)                                                   \
-  (((v) & 0xffffffffffff00ffLL) | (((Q6Vect64)((unsigned char)(new))) << 8LL))
-#define Q6V64_PUT_B2(v, new)                                                   \
-  (((v) & 0xffffffffff00ffffLL) | (((Q6Vect64)((unsigned char)(new))) << 16LL))
-#define Q6V64_PUT_B3(v, new)                                                   \
-  (((v) & 0xffffffff00ffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 24LL))
-#define Q6V64_PUT_B4(v, new)                                                   \
-  (((v) & 0xffffff00ffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 32LL))
-#define Q6V64_PUT_B5(v, new)                                                   \
-  (((v) & 0xffff00ffffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 40LL))
-#define Q6V64_PUT_B6(v, new)                                                   \
-  (((v) & 0xff00ffffffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 48LL))
-#define Q6V64_PUT_B7(v, new)                                                   \
-  (((v) & 0x00ffffffffffffffLL) | (((Q6Vect64)(new)) << 56LL))
-
-#endif /* !__qdsp6__ */
-
-/* NOTE: All create macros return a Q6Vect64 type */
-
-/* Create from a doubleword */
-
-#define Q6V64_CREATE_D(d) (d)
-
-/* Create from words */
-
-#ifdef __qdsp6__
-
-#define Q6V64_CREATE_W(w1, w0)                                                 \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      int w[2];                                                                \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.w[0] = (w0);                                         \
-    _Q6V64_internal_union.w[1] = (w1);                                         \
-    _Q6V64_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V64_CREATE_W(w1, w0)                                                 \
-  ((((Q6Vect64)(w1)) << 32LL) | ((Q6Vect64)((w0) & 0xffffffff)))
-
-#endif /* !__qdsp6__ */
-
-/* Create from half words */
-
-#ifdef __qdsp6__
-
-#define Q6V64_CREATE_H(h3, h2, h1, h0)                                         \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[4];                                                              \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.h[0] = (h0);                                         \
-    _Q6V64_internal_union.h[1] = (h1);                                         \
-    _Q6V64_internal_union.h[2] = (h2);                                         \
-    _Q6V64_internal_union.h[3] = (h3);                                         \
-    _Q6V64_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V64_CREATE_H(h3, h2, h1, h0)                                         \
-  ((((Q6Vect64)(h3)) << 48LL) | (((Q6Vect64)((h2) & 0xffff)) << 32LL) |        \
-   (((Q6Vect64)((h1) & 0xffff)) << 16LL) | ((Q6Vect64)((h0) & 0xffff)))
-
-#endif /* !__qdsp6__ */
-
-/* Create from bytes */
-
-#ifdef __qdsp6__
-
-#define Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)                         \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[8];                                                               \
-    } _Q6V64_internal_union;                                                   \
-    _Q6V64_internal_union.b[0] = (b0);                                         \
-    _Q6V64_internal_union.b[1] = (b1);                                         \
-    _Q6V64_internal_union.b[2] = (b2);                                         \
-    _Q6V64_internal_union.b[3] = (b3);                                         \
-    _Q6V64_internal_union.b[4] = (b4);                                         \
-    _Q6V64_internal_union.b[5] = (b5);                                         \
-    _Q6V64_internal_union.b[6] = (b6);                                         \
-    _Q6V64_internal_union.b[7] = (b7);                                         \
-    _Q6V64_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)                         \
-  ((((Q6Vect64)(b7)) << 56LL) | (((Q6Vect64)((b6) & 0xff)) << 48LL) |          \
-   (((Q6Vect64)((b5) & 0xff)) << 40LL) | (((Q6Vect64)((b4) & 0xff)) << 32LL) | \
-   (((Q6Vect64)((b3) & 0xff)) << 24LL) | (((Q6Vect64)((b2) & 0xff)) << 16LL) | \
-   (((Q6Vect64)((b1) & 0xff)) << 8LL) | ((Q6Vect64)((b0) & 0xff)))
-
-#endif /* !__qdsp6__ */
-
-#ifdef __cplusplus
-
-class Q6Vect64C {
-public:
-  // Constructors
-  Q6Vect64C(long long d = 0) : data(d) {};
-  Q6Vect64C(int w1, int w0) : data(Q6V64_CREATE_W(w1, w0)) {};
-  Q6Vect64C(short h3, short h2, short h1, short h0)
-      : data(Q6V64_CREATE_H(h3, h2, h1, h0)) {};
-  Q6Vect64C(signed char b7, signed char b6, signed char b5, signed char b4,
-            signed char b3, signed char b2, signed char b1, signed char b0)
-      : data(Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)) {};
-  Q6Vect64C(const Q6Vect64C &v) : data(v.data) {};
-
-  Q6Vect64C &operator=(const Q6Vect64C &v) {
-    data = v.data;
-    return *this;
-  };
-
-  operator long long() {
-    return data;
-  };
-
-  // Extract doubleword methods
-  long long D(void) {
-    return Q6V64_GET_D(data);
-  };
-  unsigned long long UD(void) {
-    return Q6V64_GET_UD(data);
-  };
-
-  // Extract word methods
-  int W0(void) {
-    return Q6V64_GET_W0(data);
-  };
-  int W1(void) {
-    return Q6V64_GET_W1(data);
-  };
-  unsigned int UW0(void) {
-    return Q6V64_GET_UW0(data);
-  };
-  unsigned int UW1(void) {
-    return Q6V64_GET_UW1(data);
-  };
-
-  // Extract half word methods
-  short H0(void) {
-    return Q6V64_GET_H0(data);
-  };
-  short H1(void) {
-    return Q6V64_GET_H1(data);
-  };
-  short H2(void) {
-    return Q6V64_GET_H2(data);
-  };
-  short H3(void) {
-    return Q6V64_GET_H3(data);
-  };
-  unsigned short UH0(void) {
-    return Q6V64_GET_UH0(data);
-  };
-  unsigned short UH1(void) {
-    return Q6V64_GET_UH1(data);
-  };
-  unsigned short UH2(void) {
-    return Q6V64_GET_UH2(data);
-  };
-  unsigned short UH3(void) {
-    return Q6V64_GET_UH3(data);
-  };
-
-  // Extract byte methods
-  signed char B0(void) {
-    return Q6V64_GET_B0(data);
-  };
-  signed char B1(void) {
-    return Q6V64_GET_B1(data);
-  };
-  signed char B2(void) {
-    return Q6V64_GET_B2(data);
-  };
-  signed char B3(void) {
-    return Q6V64_GET_B3(data);
-  };
-  signed char B4(void) {
-    return Q6V64_GET_B4(data);
-  };
-  signed char B5(void) {
-    return Q6V64_GET_B5(data);
-  };
-  signed char B6(void) {
-    return Q6V64_GET_B6(data);
-  };
-  signed char B7(void) {
-    return Q6V64_GET_B7(data);
-  };
-  unsigned char UB0(void) {
-    return Q6V64_GET_UB0(data);
-  };
-  unsigned char UB1(void) {
-    return Q6V64_GET_UB1(data);
-  };
-  unsigned char UB2(void) {
-    return Q6V64_GET_UB2(data);
-  };
-  unsigned char UB3(void) {
-    return Q6V64_GET_UB3(data);
-  };
-  unsigned char UB4(void) {
-    return Q6V64_GET_UB4(data);
-  };
-  unsigned char UB5(void) {
-    return Q6V64_GET_UB5(data);
-  };
-  unsigned char UB6(void) {
-    return Q6V64_GET_UB6(data);
-  };
-  unsigned char UB7(void) {
-    return Q6V64_GET_UB7(data);
-  };
-
-  // NOTE: All set methods return a Q6Vect64C type
-
-  // Set doubleword method
-  Q6Vect64C D(long long d) {
-    return Q6Vect64C(Q6V64_PUT_D(data, d));
-  };
-
-  // Set word methods
-  Q6Vect64C W0(int w) {
-    return Q6Vect64C(Q6V64_PUT_W0(data, w));
-  };
-  Q6Vect64C W1(int w) {
-    return Q6Vect64C(Q6V64_PUT_W1(data, w));
-  };
-
-  // Set half word methods
-  Q6Vect64C H0(short h) {
-    return Q6Vect64C(Q6V64_PUT_H0(data, h));
-  };
-  Q6Vect64C H1(short h) {
-    return Q6Vect64C(Q6V64_PUT_H1(data, h));
-  };
-  Q6Vect64C H2(short h) {
-    return Q6Vect64C(Q6V64_PUT_H2(data, h));
-  };
-  Q6Vect64C H3(short h) {
-    return Q6Vect64C(Q6V64_PUT_H3(data, h));
-  };
-
-  // Set byte methods
-  Q6Vect64C B0(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B0(data, b));
-  };
-  Q6Vect64C B1(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B1(data, b));
-  };
-  Q6Vect64C B2(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B2(data, b));
-  };
-  Q6Vect64C B3(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B3(data, b));
-  };
-  Q6Vect64C B4(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B4(data, b));
-  };
-  Q6Vect64C B5(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B5(data, b));
-  };
-  Q6Vect64C B6(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B6(data, b));
-  };
-  Q6Vect64C B7(signed char b) {
-    return Q6Vect64C(Q6V64_PUT_B7(data, b));
-  };
-
-private:
-  long long data;
-};
-
-#endif /* __cplusplus */
-
-/* 32 Bit Vectors */
-
-typedef int Q6Vect32;
-
-/* Extract word macros */
-
-#define Q6V32_GET_W(v) (v)
-#define Q6V32_GET_UW(v) ((unsigned int)(v))
-
-/* Extract half word macros */
-
-#define Q6V32_GET_H0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.h[0];                                                \
-  })
-#define Q6V32_GET_H1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.h[1];                                                \
-  })
-#define Q6V32_GET_UH0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned short uh[2];                                                    \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.uh[0];                                               \
-  })
-#define Q6V32_GET_UH1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned short uh[2];                                                    \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.uh[1];                                               \
-  })
-
-/* Extract byte macros */
-
-#define Q6V32_GET_B0(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[0];                                                \
-  })
-#define Q6V32_GET_B1(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[1];                                                \
-  })
-#define Q6V32_GET_B2(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[2];                                                \
-  })
-#define Q6V32_GET_B3(v)                                                        \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      signed char b[4];                                                        \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[3];                                                \
-  })
-#define Q6V32_GET_UB0(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.ub[0];                                               \
-  })
-#define Q6V32_GET_UB1(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.ub[1];                                               \
-  })
-#define Q6V32_GET_UB2(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.ub[2];                                               \
-  })
-#define Q6V32_GET_UB3(v)                                                       \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      unsigned char ub[4];                                                     \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.ub[3];                                               \
-  })
-
-/* NOTE: All set macros return a Q6Vect32 type */
-
-/* Set word macro */
-
-#define Q6V32_PUT_W(v, new) (new)
-
-/* Set half word macros */
-
-#ifdef __qdsp6__
-
-#define Q6V32_PUT_H0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.h[0] = (new);                                        \
-    _Q6V32_internal_union.w;                                                   \
-  })
-#define Q6V32_PUT_H1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      short h[2];                                                              \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.h[1] = (new);                                        \
-    _Q6V32_internal_union.w;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V32_PUT_H0(v, new)                                                   \
-  (((v) & 0xffff0000) | ((Q6Vect32)((unsigned short)(new))))
-#define Q6V32_PUT_H1(v, new) (((v) & 0x0000ffff) | (((Q6Vect32)(new)) << 16))
-
-#endif /* !__qdsp6__ */
-
-/* Set byte macros */
-
-#ifdef __qdsp6__
-
-#define Q6V32_PUT_B0(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[0] = (new);                                        \
-    _Q6V32_internal_union.w;                                                   \
-  })
-#define Q6V32_PUT_B1(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[1] = (new);                                        \
-    _Q6V32_internal_union.w;                                                   \
-  })
-#define Q6V32_PUT_B2(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[2] = (new);                                        \
-    _Q6V32_internal_union.w;                                                   \
-  })
-#define Q6V32_PUT_B3(v, new)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      int w;                                                                   \
-      char b[4];                                                               \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.w = (v);                                             \
-    _Q6V32_internal_union.b[3] = (new);                                        \
-    _Q6V32_internal_union.w;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V32_PUT_B0(v, new)                                                   \
-  (((v) & 0xffffff00) | ((Q6Vect32)((unsigned char)(new))))
-#define Q6V32_PUT_B1(v, new)                                                   \
-  (((v) & 0xffff00ff) | (((Q6Vect32)((unsigned char)(new))) << 8))
-#define Q6V32_PUT_B2(v, new)                                                   \
-  (((v) & 0xff00ffff) | (((Q6Vect32)((unsigned char)(new))) << 16))
-#define Q6V32_PUT_B3(v, new) (((v) & 0x00ffffff) | (((Q6Vect32)(new)) << 24))
-
-#endif /* !__qdsp6__ */
-
-/* NOTE: All create macros return a Q6Vect32 type */
-
-/* Create from a word */
-
-#define Q6V32_CREATE_W(w) (w)
-
-/* Create from half words */
-
-#ifdef __qdsp6__
-
-#define Q6V32_CREATE_H(h1, h0)                                                 \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      short h[2];                                                              \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.h[0] = (h0);                                         \
-    _Q6V32_internal_union.h[1] = (h1);                                         \
-    _Q6V32_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V32_CREATE_H(h1, h0)                                                 \
-  ((((Q6Vect32)(h1)) << 16) | ((Q6Vect32)((h0) & 0xffff)))
-
-#endif /* !__qdsp6__ */
-
-/* Create from bytes */
-#ifdef __qdsp6__
-
-#define Q6V32_CREATE_B(b3, b2, b1, b0)                                         \
-  __extension__({                                                              \
-    union {                                                                    \
-      long long d;                                                             \
-      char b[4];                                                               \
-    } _Q6V32_internal_union;                                                   \
-    _Q6V32_internal_union.b[0] = (b0);                                         \
-    _Q6V32_internal_union.b[1] = (b1);                                         \
-    _Q6V32_internal_union.b[2] = (b2);                                         \
-    _Q6V32_internal_union.b[3] = (b3);                                         \
-    _Q6V32_internal_union.d;                                                   \
-  })
-
-#else /* !__qdsp6__ */
-
-#define Q6V32_CREATE_B(b3, b2, b1, b0)                                         \
-  ((((Q6Vect32)(b3)) << 24) | (((Q6Vect32)((b2) & 0xff)) << 16) |              \
-   (((Q6Vect32)((b1) & 0xff)) << 8) | ((Q6Vect32)((b0) & 0xff)))
-
-#endif /* !__qdsp6__ */
-
-#ifdef __cplusplus
-
-class Q6Vect32C {
-public:
-  // Constructors
-  Q6Vect32C(int w = 0) : data(w) {};
-  Q6Vect32C(short h1, short h0) : data(Q6V32_CREATE_H(h1, h0)) {};
-  Q6Vect32C(signed char b3, signed char b2, signed char b1, signed char b0)
-      : data(Q6V32_CREATE_B(b3, b2, b1, b0)) {};
-  Q6Vect32C(const Q6Vect32C &v) : data(v.data) {};
-
-  Q6Vect32C &operator=(const Q6Vect32C &v) {
-    data = v.data;
-    return *this;
-  };
-
-  operator int() {
-    return data;
-  };
-
-  // Extract word methods
-  int W(void) {
-    return Q6V32_GET_W(data);
-  };
-  unsigned int UW(void) {
-    return Q6V32_GET_UW(data);
-  };
-
-  // Extract half word methods
-  short H0(void) {
-    return Q6V32_GET_H0(data);
-  };
-  short H1(void) {
-    return Q6V32_GET_H1(data);
-  };
-  unsigned short UH0(void) {
-    return Q6V32_GET_UH0(data);
-  };
-  unsigned short UH1(void) {
-    return Q6V32_GET_UH1(data);
-  };
-
-  // Extract byte methods
-  signed char B0(void) {
-    return Q6V32_GET_B0(data);
-  };
-  signed char B1(void) {
-    return Q6V32_GET_B1(data);
-  };
-  signed char B2(void) {
-    return Q6V32_GET_B2(data);
-  };
-  signed char B3(void) {
-    return Q6V32_GET_B3(data);
-  };
-  unsigned char UB0(void) {
-    return Q6V32_GET_UB0(data);
-  };
-  unsigned char UB1(void) {
-    return Q6V32_GET_UB1(data);
-  };
-  unsigned char UB2(void) {
-    return Q6V32_GET_UB2(data);
-  };
-  unsigned char UB3(void) {
-    return Q6V32_GET_UB3(data);
-  };
-
-  // NOTE: All set methods return a Q6Vect32C type
-
-  // Set word method
-  Q6Vect32C W(int w) {
-    return Q6Vect32C(Q6V32_PUT_W(data, w));
-  };
-
-  // Set half word methods
-  Q6Vect32C H0(short h) {
-    return Q6Vect32C(Q6V32_PUT_H0(data, h));
-  };
-  Q6Vect32C H1(short h) {
-    return Q6Vect32C(Q6V32_PUT_H1(data, h));
-  };
-
-  // Set byte methods
-  Q6Vect32C B0(signed char b) {
-    return Q6Vect32C(Q6V32_PUT_B0(data, b));
-  };
-  Q6Vect32C B1(signed char b) {
-    return Q6Vect32C(Q6V32_PUT_B1(data, b));
-  };
-  Q6Vect32C B2(signed char b) {
-    return Q6Vect32C(Q6V32_PUT_B2(data, b));
-  };
-  Q6Vect32C B3(signed char b) {
-    return Q6Vect32C(Q6V32_PUT_B3(data, b));
-  };
-
-private:
-  int data;
-};
-
-#endif /* __cplusplus */
-
-// V65 Vector types
-#if __HVX_ARCH__ >= 65
-#if defined __HVX__ && (__HVX_LENGTH__ == 128)
-typedef long Q6VecPred128 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-typedef long Q6Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-typedef long Q6Vect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(256)));
-
-#else /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
-#if defined __HVX__ &&  (__HVX_LENGTH__ == 64)
-typedef long Q6VecPred64 __attribute__((__vector_size__(64)))
-    __attribute__((aligned(64)));
-
-typedef long Q6Vect512 __attribute__((__vector_size__(64)))
-    __attribute__((aligned(64)));
-
-typedef long Q6Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-#endif /* defined __HVX__ &&  (__HVX_LENGTH__ == 64) */
-#endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
-#endif /* __HVX_ARCH__ >= 65 */
-
-/* Predicates */
-
-typedef int Q6Pred;
-
-
-#ifdef __HVX__
-
-// Extract HVX VectorPair macro.
-#define HEXAGON_HVX_GET_W(v) (v)
-
-// Extract HVX Vector macros.
-#define HEXAGON_HVX_GET_V0(v)                                                  \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_Vector V[2];                                                         \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.W = (v);                                       \
-    _HEXAGON_HVX_internal_union.V[0];                                          \
-  })
-#define HEXAGON_HVX_GET_V1(v)                                                  \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_Vector V[2];                                                         \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.W = (v);                                       \
-    _HEXAGON_HVX_internal_union.V[1];                                          \
-  })
-#define HEXAGON_HVX_GET_P(v)                                                   \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_VectorPred P[2];                                                     \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.W = (v);                                       \
-    _HEXAGON_HVX_internal_union.P[0];                                          \
-  })
-
-// Set HVX VectorPair macro.
-#define HEXAGON_HVX_PUT_W(v, new) (new)
-
-// Set HVX Vector macros.
-#define HEXAGON_HVX_PUT_V0(v, new)                                             \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_Vector V[2];                                                         \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.W = (v);                                       \
-    _HEXAGON_HVX_internal_union.V[0] = (new);                                  \
-    _HEXAGON_HVX_internal_union.W;                                             \
-  })
-
-#define HEXAGON_HVX_PUT_V1(v, new)                                             \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_Vector V[2];                                                         \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.W = (v);                                       \
-    _HEXAGON_HVX_internal_union.V[1] = (new);                                  \
-    _HEXAGON_HVX_internal_union.W;                                             \
-  })
-
-#define HEXAGON_HVX_PUT_P(v, new)                                              \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_VectorPred P[2];                                                     \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.W = (v);                                       \
-    _HEXAGON_HVX_internal_union.P[0] = (new);                                  \
-    _HEXAGON_HVX_internal_union.W;                                             \
-  })
-
-
-#define HEXAGON_HVX_CREATE_W(v1, v0)                                           \
-  __extension__({                                                              \
-    union {                                                                    \
-      HVX_VectorPair W;                                                        \
-      HVX_Vector V[2];                                                         \
-    } _HEXAGON_HVX_internal_union;                                             \
-    _HEXAGON_HVX_internal_union.V[0] = (v0);                                   \
-    _HEXAGON_HVX_internal_union.V[1] = (v1);                                   \
-    _HEXAGON_HVX_internal_union.W;                                             \
-  })
-
-#ifdef __cplusplus
-
-class HVX_Vect {
-public:
-  // Constructors.
-  // Default.
-  HVX_Vect() : data(Q6_W_vcombine_VV(Q6_V_vzero(), Q6_V_vzero())){};
-
-  // Custom constructors.
-  HVX_Vect(HVX_VectorPair W) : data(W){};
-  HVX_Vect(HVX_Vector v1, HVX_Vector v0) : data(HEXAGON_HVX_CREATE_W(v1, v0)){};
-
-  // Copy constructor.
-  HVX_Vect(const HVX_Vect &W) = default;
-
-  // Move constructor.
-  HVX_Vect(HVX_Vect &&W) = default;
-
-  // Assignment operator.
-  HVX_Vect &operator=(const HVX_Vect &W) = default;
-
-  operator HVX_VectorPair() { return data; };
-
-  // Extract VectorPair method.
-  HVX_VectorPair W(void) { return HEXAGON_HVX_GET_W(data); };
-
-  // Extract Vector methods.
-  HVX_Vector V0(void) { return HEXAGON_HVX_GET_V0(data); };
-  HVX_Vector V1(void) { return HEXAGON_HVX_GET_V1(data); };
-  HVX_VectorPred P(void) { return HEXAGON_HVX_GET_P(data); };
-
-  // NOTE: All set methods return a HVX_Vect type.
-  // Set HVX VectorPair method.
-  HVX_Vect W(HVX_VectorPair w) { return HVX_Vect(HEXAGON_HVX_PUT_W(data, w)); };
-
-  // Set HVX Vector methods.
-  HVX_Vect V0(HVX_Vector v) { return HVX_Vect(HEXAGON_HVX_PUT_V0(data, v)); };
-  HVX_Vect V1(HVX_Vector v) { return HVX_Vect(HEXAGON_HVX_PUT_V1(data, v)); };
-  HVX_Vect P(HVX_VectorPred p) { return HVX_Vect(HEXAGON_HVX_PUT_P(data, p)); };
-
-private:
-  HVX_VectorPair data;
-};
-
-#endif /* __cplusplus */
-#endif /* __HVX__ */
-
-#define HEXAGON_UDMA_DM0_STATUS_IDLE             0x00000000
-#define HEXAGON_UDMA_DM0_STATUS_RUN              0x00000001
-#define HEXAGON_UDMA_DM0_STATUS_ERROR            0x00000002
-#define HEXAGON_UDMA_DESC_DSTATE_INCOMPLETE      0
-#define HEXAGON_UDMA_DESC_DSTATE_COMPLETE        1
-#define HEXAGON_UDMA_DESC_ORDER_NOORDER          0
-#define HEXAGON_UDMA_DESC_ORDER_ORDER            1
-#define HEXAGON_UDMA_DESC_BYPASS_OFF             0
-#define HEXAGON_UDMA_DESC_BYPASS_ON              1
-#define HEXAGON_UDMA_DESC_COMP_NONE              0
-#define HEXAGON_UDMA_DESC_COMP_DLBC              1
-#define HEXAGON_UDMA_DESC_DESCTYPE_TYPE0         0
-#define HEXAGON_UDMA_DESC_DESCTYPE_TYPE1         1
-
-typedef struct hexagon_udma_descriptor_type0_s
-{
-    void *next;
-    unsigned int length:24;
-    unsigned int desctype:2;
-    unsigned int dstcomp:1;
-    unsigned int srccomp:1;
-    unsigned int dstbypass:1;
-    unsigned int srcbypass:1;
-    unsigned int order:1;
-    unsigned int dstate:1;
-    void *src;
-    void *dst;
-} hexagon_udma_descriptor_type0_t;
-
-typedef struct hexagon_udma_descriptor_type1_s
-{
-    void *next;
-    unsigned int length:24;
-    unsigned int desctype:2;
-    unsigned int dstcomp:1;
-    unsigned int srccomp:1;
-    unsigned int dstbypass:1;
-    unsigned int srcbypass:1;
-    unsigned int order:1;
-    unsigned int dstate:1;
-    void *src;
-    void *dst;
-    unsigned int allocation:28;
-    unsigned int padding:4;
-    unsigned int roiwidth:16;
-    unsigned int roiheight:16;
-    unsigned int srcstride:16;
-    unsigned int dststride:16;
-    unsigned int srcwidthoffset:16;
-    unsigned int dstwidthoffset:16;
-} hexagon_udma_descriptor_type1_t;
-
-#endif /* !HEXAGON_TYPES_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h b/linux-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h
deleted file mode 100644
index 41ce7a6..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h
+++ /dev/null
@@ -1,4392 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-// Automatically generated file, do not edit!
-//===----------------------------------------------------------------------===//
-
-
-
-#ifndef _HVX_HEXAGON_PROTOS_H_
-#define _HVX_HEXAGON_PROTOS_H_ 1
-
-#ifdef __HVX__
-#if __HVX_LENGTH__ == 128
-#define __BUILTIN_VECTOR_WRAP(a) a ## _128B
-#else
-#define __BUILTIN_VECTOR_WRAP(a) a
-#endif
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Rd32=vextract(Vu32,Rs32)
-   C Intrinsic Prototype: Word32 Q6_R_vextract_VR(HVX_Vector Vu, Word32 Rs)
-   Instruction Type:      LD
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_R_vextract_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=hi(Vss32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_hi_W(HVX_VectorPair Vss)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_hi_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=lo(Vss32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_lo_W(HVX_VectorPair Vss)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_lo_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vsplat(Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vsplat_R(Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_V_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=and(Qs4,Qt4)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_and_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=and(Qs4,!Qt4)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_and_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=not(Qs4)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_not_Q(HVX_VectorPred Qs)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_not_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=or(Qs4,Qt4)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_or_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=or(Qs4,!Qt4)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_or_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vsetq(Rt32)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq_R(Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vsetq_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=xor(Qs4,Qt4)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_xor_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_xor_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) vmem(Rt32+#s4)=Vs32
-   C Intrinsic Prototype: void Q6_vmem_QnRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)
-   Instruction Type:      CVI_VM_ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vmem_QnRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) vmem(Rt32+#s4):nt=Vs32
-   C Intrinsic Prototype: void Q6_vmem_QnRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)
-   Instruction Type:      CVI_VM_ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vmem_QnRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) vmem(Rt32+#s4):nt=Vs32
-   C Intrinsic Prototype: void Q6_vmem_QRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)
-   Instruction Type:      CVI_VM_ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vmem_QRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) vmem(Rt32+#s4)=Vs32
-   C Intrinsic Prototype: void Q6_vmem_QRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)
-   Instruction Type:      CVI_VM_ST
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vmem_QRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vabsdiff(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuh_vabsdiff_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vabsdiff_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vub_vabsdiff_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuh_vabsdiff_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vabsdiff(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vabsdiff_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vabsdiff_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vabs(Vu32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vabs_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vabs(Vu32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh_sat(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vabs_Vh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vabs(Vu32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vabs_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vabs(Vu32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw_sat(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vabs_Vw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vadd(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vadd_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.b=vadd(Vuu32.b,Vvv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wb_vadd_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) Vx32.b+=Vu32.b
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_condacc_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) Vx32.b+=Vu32.b
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_condacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vadd(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vadd(Vuu32.h,Vvv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vadd_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) Vx32.h+=Vu32.h
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_condacc_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) Vx32.h+=Vu32.h
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_condacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vadd(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vadd_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vadd_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vadd(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vadd(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vadd_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vadd_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vadd_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wub_vadd_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vadd_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vadd_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vadd_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuh_vadd_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vadd(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vadd_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vadd(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vadd_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vadd(Vuu32.w,Vvv32.w)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vadd_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) Vx32.w+=Vu32.w
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_condacc_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) Vx32.w+=Vu32.w
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_condacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vadd(Vu32.w,Vv32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vadd_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vadd_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=valign(Vu32,Vv32,Rt8)
-   C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_valign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=valign(Vu32,Vv32,#u3)
-   C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_valign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vand(Vu32,Vv32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vand_VV(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vand_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vand(Qu4,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vand_QR(HVX_VectorPred Qu, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_V_vand_QR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32|=vand(Qu4,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_V_vandor_VQR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vand(Vu32,Rt32)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vand_VR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Q_vand_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vand(Vu32,Rt32)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vandor_QVR(HVX_VectorPred Qx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Q_vandor_QVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasl(Vu32.h,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasl_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasl(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasl_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vasl(Vu32.w,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vasl_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vasl(Vu32.w,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vaslacc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vaslacc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vasl(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vasl_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasr(Vu32.h,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasr_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasr(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vasr(Vu32.w,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vasr_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vasr(Vu32.w,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vasracc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vasracc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasr_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vasr(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vasr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=Vu32
-   C Intrinsic Prototype: HVX_Vector Q6_V_equals_V(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_equals_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=Vuu32
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_equals_W(HVX_VectorPair Vuu)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_W_equals_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vavg(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vavg(Vu32.h,Vv32.h):rnd
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh_rnd(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vavg_VhVh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vavg(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub_rnd(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vavg_VubVub_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vavg(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vavg_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh_rnd(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vavg_VuhVuh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vavg(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vavg(Vu32.w,Vv32.w):rnd
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw_rnd(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vavg_VwVw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vcl0(Vu32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcl0_Vuh(HVX_Vector Vu)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vcl0_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vcl0(Vu32.uw)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vcl0_Vuw(HVX_Vector Vu)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vcl0_Vuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=vcombine(Vu32,Vv32)
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vcombine_VV(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_W_vcombine_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=#0
-   C Intrinsic Prototype: HVX_Vector Q6_V_vzero()
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vdeal(Vu32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeal_Vb(HVX_Vector Vu)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vdeal_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vdeale(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeale_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vdeale_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vdeal(Vu32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vdeal_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vdeal_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=vdeal(Vu32,Vv32,Rt8)
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vdeal_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_W_vdeal_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vdelta(Vu32,Vv32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vdelta_VV(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vdmpy(Vu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpy_VubRb(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vdmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h+=vdmpy(Vu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpyacc_VhVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vdmpyacc_VhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vdmpy(Vuu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vdmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vdmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRb(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpy_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpyacc_VwVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vdmpy(Vuu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vdmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vdmpy(Vuu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vdmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRh_sat(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpy_WhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpyacc_VwWhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpy_VhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpyacc_VwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRuh_sat(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpy_WhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRuh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpyacc_VwWhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpy_VhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpyacc_VwVhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpy_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhVh_sat(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vdmpyacc_VwVhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsad_WuhRuh(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vdsad_WuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsadacc_WuwWuhRuh(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vdsadacc_WuwWuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.eq(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eq_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.eq(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.eq(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.eq(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.eq(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eq_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.eq(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.eq(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.eq(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.eq(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eq_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.eq(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.eq(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.eq(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_eqxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.gt(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gt_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.gt(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gt_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.gt(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gt_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtand_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtor_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtxacc_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.gt(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gt_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtand_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtor_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtxacc_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.gt(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gt_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtand_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtor_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtxacc_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vcmp.gt(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gt_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vcmp_gtxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w=vinsert(Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vinsert_VwR(HVX_Vector Vx, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vinsert_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vlalign(Vu32,Vv32,Rt8)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vlalign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vlalign(Vu32,Vv32,#u3)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vlalign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vlsr(Vu32.uh,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vlsr_VuhR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vlsr_VuhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vlsr(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vlsr_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vlsr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vlsr(Vu32.uw,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vlsr_VuwR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vlsr_VuwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vlsr(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vlsr_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vlsr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vlut32_VbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbR(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vlut32or_VbVbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vlut16_VbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhR(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vlut16or_WhVbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmax(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmax_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vmax_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vmax(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vmax_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vmax_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vmax(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmax_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vmax_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmax(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmax_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vmax_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmin(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmin_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vmin_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vmin(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vmin_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vmin_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vmin(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmin_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vmin_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmin(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmin_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vmin_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpa(Vuu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpa_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpaacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpa_WubWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWub(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpa_WubWub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vmpa(Vuu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WhRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpa_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vmpa(Vuu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpaacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpy(Vu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubRb(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vmpy(Vu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubRb(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpyacc_WhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpy(Vu32.ub,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vmpy(Vu32.ub,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpyacc_WhVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpy(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vmpy(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVbVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpyacc_WhVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpye(Vu32.w,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vmpy(Vu32.h,Rt32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhRh(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpy_VhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh_sat(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpyacc_WwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpy_VhRh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpy_VhRh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vmpy(Vu32.h,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpy_VhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vmpy(Vu32.h,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpyacc_WwVhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vmpy(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpy_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vmpy(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpyacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpy_VhVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyieo(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieo_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyieo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyie(Vu32.w,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyieacc_VwVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyie(Vu32.w,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyie_VwVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyie_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyie(Vu32.w,Vv32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVuh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyieacc_VwVwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmpyi(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpyi_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h+=vmpyi(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpyiacc_VhVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vmpyi(Vu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhRb(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpyi_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h+=vmpyi(Vu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vmpyiacc_VhVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyio(Vu32.w,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyio_VwVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyio_VwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyi(Vu32.w,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRb(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyi_VwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyi(Vu32.w,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyiacc_VwVwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyi(Vu32.w,Rt32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRh(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyi_VwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyi(Vu32.w,Rt32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyiacc_VwVwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyo_VwVh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubRub(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuh_vmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubRub(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuh_vmpyacc_WuhVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuh_vmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuh_vmpyacc_WuhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhRuh(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vmpy_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhRuh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vmpyacc_WuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vmpy_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vmpyacc_WuwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vmux(Qt4,Vu32,Vv32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vmux_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vmux_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vnavg(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vnavg_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vnavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vnavg(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vnavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vnavg(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vnavg_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vnavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vnormamt(Vu32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vnormamt_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vnormamt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vnormamt(Vu32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vnormamt_Vw(HVX_Vector Vu)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vnormamt_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vnot(Vu32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vnot_V(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vnot_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vor(Vu32,Vv32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vor_VV(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vpacke(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacke_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vpacke_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vpacke(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacke_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vpacke_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vpack(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vpack(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vpacko(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacko_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vpacko_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vpacko(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacko_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vpacko_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vpack(Vu32.w,Vv32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vpack(Vu32.w,Vv32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vpopcount(Vu32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vpopcount_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vpopcount_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vrdelta(Vu32,Vv32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vrdelta_VV(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vrdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vrmpy(Vu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubRb(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vrmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vrmpy(Vu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vrmpyacc_VwVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpy_WubRbI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vrmpy_WubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpyacc_WwWubRbI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vrmpyacc_WwWubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vrmpy(Vu32.ub,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vrmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vrmpy(Vu32.ub,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vrmpyacc_VwVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vrmpy(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vrmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vrmpy(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vrmpyacc_VwVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vrmpy(Vu32.ub,Rt32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubRub(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vrmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vrmpyacc_VuwVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpy_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vrmpy_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpyacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vrmpyacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vrmpy(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vrmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vrmpyacc_VuwVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vror(Vu32,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vror_VR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vror_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vround(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vround(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vround(Vu32.w,Vv32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vround(Vu32.w,Vv32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsad_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vrsad_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsadacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wuw_vrsadacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vsat(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vsat_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vsat_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vsat(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vsat_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vsat_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vsxt(Vu32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsxt_Vb(HVX_Vector Vu)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vsxt_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vsxt(Vu32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsxt_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vsxt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vshuffe(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffe_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vshuffe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vshuff(Vu32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuff_Vb(HVX_Vector Vu)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vshuff_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vshuffe(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffe_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vshuffe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vshuff(Vu32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuff_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vshuff_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vshuffo(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffo_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vshuffo_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=vshuff(Vu32,Vv32,Rt8)
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vshuff_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_W_vshuff_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.b=vshuffoe(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vshuffoe_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wb_vshuffoe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vshuffoe(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vshuffoe_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vshuffoe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vshuffo(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffo_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vshuffo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vsub(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vsub_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.b=vsub(Vuu32.b,Vvv32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wb_vsub_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) Vx32.b-=Vu32.b
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_condnac_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) Vx32.b-=Vu32.b
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_condnac_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vsub(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vsub(Vuu32.h,Vvv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vsub_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) Vx32.h-=Vu32.h
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_condnac_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) Vx32.h-=Vu32.h
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_condnac_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vsub(Vu32.h,Vv32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vsub_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vsub_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vsub(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vsub(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_VubVub(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vsub_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vsub_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vsub_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wub_vsub_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsub_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vsub_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vsub_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuh_vsub_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vsub(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vsub_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vsub(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vsub_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vsub(Vuu32.w,Vvv32.w)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vsub_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (!Qv4) Vx32.w-=Vu32.w
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_condnac_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       if (Qv4) Vx32.w-=Vu32.w
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_condnac_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vsub(Vu32.w,Vv32.w):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vsub_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vsub_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=vswap(Qt4,Vu32,Vv32)
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vswap_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_W_vswap_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vtmpy(Vuu32.b,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WbRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vtmpy_WbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vtmpy(Vuu32.b,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWbRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vtmpyacc_WhWbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vtmpy(Vuu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vtmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vtmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vtmpy(Vuu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vtmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vtmpy(Vuu32.h,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vtmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vunpack(Vu32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpack_Vb(HVX_Vector Vu)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vunpack_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vunpack(Vu32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpack_Vh(HVX_Vector Vu)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vunpack_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h|=vunpacko(Vu32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpackoor_WhVb(HVX_VectorPair Vxx, HVX_Vector Vu)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vunpackoor_WhVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w|=vunpacko(Vu32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpackoor_WwVh(HVX_VectorPair Vxx, HVX_Vector Vu)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vunpackoor_WwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uh=vunpack(Vu32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vunpack_Vub(HVX_Vector Vu)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuh_vunpack_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vunpack(Vu32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vunpack_Vuh(HVX_Vector Vu)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuw_vunpack_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vxor(Vu32,Vv32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vxor_VV(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vxor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uh=vzxt(Vu32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vzxt_Vub(HVX_Vector Vu)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuh_vzxt_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 60
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vzxt(Vu32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vzxt_Vuh(HVX_Vector Vu)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuw_vzxt_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)
-#endif /* __HEXAGON_ARCH___ >= 60 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vsplat(Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vsplat_R(Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vb_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vsplat(Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vsplat_R(Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vh_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Qd4=vsetq2(Rt32)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq2_R(Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Q_vsetq2_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Qd4.b=vshuffe(Qs4.h,Qt4.h)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Qb_vshuffe_QhQh(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Qb_vshuffe_QhQh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Qd4.h=vshuffe(Qs4.w,Qt4.w)
-   C Intrinsic Prototype: HVX_VectorPred Q6_Qh_vshuffe_QwQw(HVX_VectorPred Qs, HVX_VectorPred Qt)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Qh_vshuffe_QwQw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vadd(Vu32.b,Vv32.b):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vadd_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wb_vadd_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vadd(Vu32.w,Vv32.w,Qx4):carry
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vadd_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vadd(vclb(Vu32.h),Vv32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_vclb_VhVh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vadd_vclb_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vadd(vclb(Vu32.w),Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_vclb_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vadd_vclb_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vadd(Vu32.h,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vaddacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vadd(Vu32.ub,Vv32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vaddacc_WhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vaddacc_WhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vadd(Vu32.ub,Vv32.b):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vadd_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vadd(Vu32.uh,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vaddacc_WwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vadd_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vadd_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vadd_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuw_vadd_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vand(!Qu4,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnR(HVX_VectorPred Qu, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_V_vand_QnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vx32|=vand(!Qu4,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQnR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt)
-   Instruction Type:      CVI_VX_LATE
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_V_vandor_VQnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vand(!Qv4,Vu32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnV(HVX_VectorPred Qv, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vand_QnV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vand(Qv4,Vu32)
-   C Intrinsic Prototype: HVX_Vector Q6_V_vand_QV(HVX_VectorPred Qv, HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vand_QV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vasr_VuwVuwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vlsr(Vu32.ub,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vlsr_VubR(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vlsr_VubR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vlut32_VbVbR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbI(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vlut32or_VbVbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
-   Instruction Type:      CVI_VP
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vlut32_VbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vlut16_VbVhR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhI(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vlut16or_WhVbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wh_vlut16_VbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vmax(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vmax_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vmax_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vmin(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vmin_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vmin_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=vmpa(Vuu32.uh,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WuhRb(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpa_WuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWuhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpaacc_WwWuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=vmpye(Vu32.w,Vv32.uh)
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_W_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vmpyi(Vu32.w,Rt32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRub(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyi_VwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vx32.w+=vmpyi(Vu32.w,Rt32.ub)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vw_vmpyiacc_VwVwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vxx32+=vmpyo(Vu32.w,Vv32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpyoacc_WVwVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_W_vmpyoacc_WVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vround(Vu32.uh,Vv32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vround_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vround(Vu32.uw,Vv32.uw):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vround_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vsat(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsat_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vsat_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vsub(Vu32.b,Vv32.b):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vsub_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wb_vsub_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vsub(Vu32.w,Vv32.w,Qx4):carry
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vsub_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vsub(Vu32.ub,Vv32.b):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vsub_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vsub_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vsub_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vsub_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_VA_DV
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Wuw_vsub_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vabs(Vu32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vabs_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vabs(Vu32.b):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb_sat(HVX_Vector Vu)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vabs_Vb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h+=vasl(Vu32.h,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vaslacc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vaslacc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h+=vasr(Vu32.h,Rt32)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vasracc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_vasracc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vasr_VuhVuhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vub_vasr_VuhVuhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuh_vasr_VuwVuwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vavg(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vavg(Vu32.b,Vv32.b):rnd
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb_rnd(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vavg_VbVb_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vavg(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vavg_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw_rnd(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vavg_VuwVuw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vdd32=#0
-   C Intrinsic Prototype: HVX_VectorPair Q6_W_vzero()
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_W_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vtmp.h=vgather(Rt32,Mu2,Vv32.h).h
-   C Intrinsic Prototype: void Q6_vgather_ARMVh(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv)
-   Instruction Type:      CVI_GATHER
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_vgather_ARMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h
-   C Intrinsic Prototype: void Q6_vgather_AQRMVh(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv)
-   Instruction Type:      CVI_GATHER
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_vgather_AQRMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h
-   C Intrinsic Prototype: void Q6_vgather_ARMWw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_GATHER_DV
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_vgather_ARMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h
-   C Intrinsic Prototype: void Q6_vgather_AQRMWw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv)
-   Instruction Type:      CVI_GATHER_DV
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_vgather_AQRMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vtmp.w=vgather(Rt32,Mu2,Vv32.w).w
-   C Intrinsic Prototype: void Q6_vgather_ARMVw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv)
-   Instruction Type:      CVI_GATHER
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_vgather_ARMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w
-   C Intrinsic Prototype: void Q6_vgather_AQRMVw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv)
-   Instruction Type:      CVI_GATHER
-   Execution Slots:       SLOT01
-   ========================================================================== */
-
-#define Q6_vgather_AQRMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=vlut4(Vu32.uh,Rtt32.h)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vlut4_VuhPh(HVX_Vector Vu, Word64 Rtt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT2
-   ========================================================================== */
-
-#define Q6_Vh_vlut4_VuhPh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRub(HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpa_WubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRub(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Wh_vmpaacc_WhWubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h=vmpa(Vx32.h,Vu32.h,Rtt32.h):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVhPh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT2
-   ========================================================================== */
-
-#define Q6_Vh_vmpa_VhVhVhPh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h=vmpa(Vx32.h,Vu32.uh,Rtt32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT2
-   ========================================================================== */
-
-#define Q6_Vh_vmpa_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vx32.h=vmps(Vx32.h,Vu32.uh,Rtt32.uh):sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_vmps_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT2
-   ========================================================================== */
-
-#define Q6_Vh_vmps_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=vmpy(Vu32.h,Rt32.h)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_vmpyacc_WwVhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vmpye(Vu32.uh,Rt32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpye_VuhRuh(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vmpye_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpyeacc_VuwVuhRuh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Vuw_vmpyeacc_VuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=vnavg(Vu32.b,Vv32.b)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VbVb(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_vnavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.b=prefixsum(Qv4)
-   C Intrinsic Prototype: HVX_Vector Q6_Vb_prefixsum_Q(HVX_VectorPred Qv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vb_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.h=prefixsum(Qv4)
-   C Intrinsic Prototype: HVX_Vector Q6_Vh_prefixsum_Q(HVX_VectorPred Qv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vh_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=prefixsum(Qv4)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_prefixsum_Q(HVX_VectorPred Qv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vscatter(Rt32,Mu2,Vv32.h).h=Vw32
-   C Intrinsic Prototype: void Q6_vscatter_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatter_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vscatter(Rt32,Mu2,Vv32.h).h+=Vw32
-   C Intrinsic Prototype: void Q6_vscatteracc_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatteracc_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32
-   C Intrinsic Prototype: void Q6_vscatter_QRMVhV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatter_QRMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vscatter(Rt32,Mu2,Vvv32.w).h=Vw32
-   C Intrinsic Prototype: void Q6_vscatter_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER_DV
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatter_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32
-   C Intrinsic Prototype: void Q6_vscatteracc_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER_DV
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatteracc_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32
-   C Intrinsic Prototype: void Q6_vscatter_QRMWwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER_DV
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatter_QRMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vscatter(Rt32,Mu2,Vv32.w).w=Vw32
-   C Intrinsic Prototype: void Q6_vscatter_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatter_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       vscatter(Rt32,Mu2,Vv32.w).w+=Vw32
-   C Intrinsic Prototype: void Q6_vscatteracc_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatteracc_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 65
-/* ==========================================================================
-   Assembly Syntax:       if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32
-   C Intrinsic Prototype: void Q6_vscatter_QRMVwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
-   Instruction Type:      CVI_SCATTER
-   Execution Slots:       SLOT0
-   ========================================================================== */
-
-#define Q6_vscatter_QRMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)
-#endif /* __HEXAGON_ARCH___ >= 65 */
-
-#if __HVX_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry_sat(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred Qs)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vadd_VwVwQ_carry_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HVX_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w=vasrinto(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vasrinto_WwVwVw(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VP_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Ww_vasrinto_WwVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HVX_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Vd32.uw=vrotr(Vu32.uw,Vv32.uw)
-   C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrotr_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VS
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vuw_vrotr_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HVX_ARCH__ >= 66
-/* ==========================================================================
-   Assembly Syntax:       Vd32.w=vsatdw(Vu32.w,Vv32.w)
-   C Intrinsic Prototype: HVX_Vector Q6_Vw_vsatdw_VwVw(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VA
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_Vw_vsatdw_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)
-#endif /* __HEXAGON_ARCH___ >= 66 */
-
-#if __HVX_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_h(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_v6mpy_WubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HVX_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_h(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_v6mpyacc_WwWubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HVX_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_v(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_v6mpy_WubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#if __HVX_ARCH__ >= 68
-/* ==========================================================================
-   Assembly Syntax:       Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v
-   C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_v(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)
-   Instruction Type:      CVI_VX_DV
-   Execution Slots:       SLOT23
-   ========================================================================== */
-
-#define Q6_Ww_v6mpyacc_WwWubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)
-#endif /* __HEXAGON_ARCH___ >= 68 */
-
-#endif /* __HVX__ */
-
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/limits.h b/linux-x86/lib64/clang/14.0.2/include/limits.h
deleted file mode 100644
index c653580..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/limits.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*===---- limits.h - Standard header for integer sizes --------------------===*\
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
-\*===----------------------------------------------------------------------===*/
-
-#ifndef __CLANG_LIMITS_H
-#define __CLANG_LIMITS_H
-
-/* The system's limits.h may, in turn, try to #include_next GCC's limits.h.
-   Avert this #include_next madness. */
-#if defined __GNUC__ && !defined _GCC_LIMITS_H_
-#define _GCC_LIMITS_H_
-#endif
-
-/* System headers include a number of constants from POSIX in <limits.h>.
-   Include it if we're hosted. */
-#if __STDC_HOSTED__ && __has_include_next(<limits.h>)
-#include_next <limits.h>
-#endif
-
-/* Many system headers try to "help us out" by defining these.  No really, we
-   know how big each datatype is. */
-#undef  SCHAR_MIN
-#undef  SCHAR_MAX
-#undef  UCHAR_MAX
-#undef  SHRT_MIN
-#undef  SHRT_MAX
-#undef  USHRT_MAX
-#undef  INT_MIN
-#undef  INT_MAX
-#undef  UINT_MAX
-#undef  LONG_MIN
-#undef  LONG_MAX
-#undef  ULONG_MAX
-
-#undef  CHAR_BIT
-#undef  CHAR_MIN
-#undef  CHAR_MAX
-
-/* C90/99 5.2.4.2.1 */
-#define SCHAR_MAX __SCHAR_MAX__
-#define SHRT_MAX  __SHRT_MAX__
-#define INT_MAX   __INT_MAX__
-#define LONG_MAX  __LONG_MAX__
-
-#define SCHAR_MIN (-__SCHAR_MAX__-1)
-#define SHRT_MIN  (-__SHRT_MAX__ -1)
-#define INT_MIN   (-__INT_MAX__  -1)
-#define LONG_MIN  (-__LONG_MAX__ -1L)
-
-#define UCHAR_MAX (__SCHAR_MAX__*2  +1)
-#define USHRT_MAX (__SHRT_MAX__ *2  +1)
-#define UINT_MAX  (__INT_MAX__  *2U +1U)
-#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)
-
-#ifndef MB_LEN_MAX
-#define MB_LEN_MAX 1
-#endif
-
-#define CHAR_BIT  __CHAR_BIT__
-
-#ifdef __CHAR_UNSIGNED__  /* -funsigned-char */
-#define CHAR_MIN 0
-#define CHAR_MAX UCHAR_MAX
-#else
-#define CHAR_MIN SCHAR_MIN
-#define CHAR_MAX __SCHAR_MAX__
-#endif
-
-/* C99 5.2.4.2.1: Added long long.
-   C++11 18.3.3.2: same contents as the Standard C Library header <limits.h>.
- */
-#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L
-
-#undef  LLONG_MIN
-#undef  LLONG_MAX
-#undef  ULLONG_MAX
-
-#define LLONG_MAX  __LONG_LONG_MAX__
-#define LLONG_MIN  (-__LONG_LONG_MAX__-1LL)
-#define ULLONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
-#endif
-
-/* LONG_LONG_MIN/LONG_LONG_MAX/ULONG_LONG_MAX are a GNU extension.  It's too bad
-   that we don't have something like #pragma poison that could be used to
-   deprecate a macro - the code should just use LLONG_MAX and friends.
- */
-#if defined(__GNU_LIBRARY__) ? defined(__USE_GNU) : !defined(__STRICT_ANSI__)
-
-#undef   LONG_LONG_MIN
-#undef   LONG_LONG_MAX
-#undef   ULONG_LONG_MAX
-
-#define LONG_LONG_MAX  __LONG_LONG_MAX__
-#define LONG_LONG_MIN  (-__LONG_LONG_MAX__-1LL)
-#define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
-#endif
-
-#endif /* __CLANG_LIMITS_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/opencl-c-base.h b/linux-x86/lib64/clang/14.0.2/include/opencl-c-base.h
deleted file mode 100644
index 9c81ddb..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/opencl-c-base.h
+++ /dev/null
@@ -1,775 +0,0 @@
-//===----- opencl-c-base.h - OpenCL C language base definitions -----------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _OPENCL_BASE_H_
-#define _OPENCL_BASE_H_
-
-// Define extension macros
-
-#if (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
-// For SPIR and SPIR-V all extensions are supported.
-#if defined(__SPIR__) || defined(__SPIRV__)
-#define cl_khr_subgroup_extended_types 1
-#define cl_khr_subgroup_non_uniform_vote 1
-#define cl_khr_subgroup_ballot 1
-#define cl_khr_subgroup_non_uniform_arithmetic 1
-#define cl_khr_subgroup_shuffle 1
-#define cl_khr_subgroup_shuffle_relative 1
-#define cl_khr_subgroup_clustered_reduce 1
-#define cl_khr_extended_bit_ops 1
-#define cl_khr_integer_dot_product 1
-#define __opencl_c_integer_dot_product_input_4x8bit 1
-#define __opencl_c_integer_dot_product_input_4x8bit_packed 1
-#define cl_ext_float_atomics 1
-#ifdef cl_khr_fp16
-#define __opencl_c_ext_fp16_global_atomic_load_store 1
-#define __opencl_c_ext_fp16_local_atomic_load_store 1
-#define __opencl_c_ext_fp16_global_atomic_add 1
-#define __opencl_c_ext_fp16_local_atomic_add 1
-#define __opencl_c_ext_fp16_global_atomic_min_max 1
-#define __opencl_c_ext_fp16_local_atomic_min_max 1
-#endif
-#ifdef cl_khr_fp64
-#define __opencl_c_ext_fp64_global_atomic_add 1
-#define __opencl_c_ext_fp64_local_atomic_add 1
-#define __opencl_c_ext_fp64_global_atomic_min_max 1
-#define __opencl_c_ext_fp64_local_atomic_min_max 1
-#endif
-#define __opencl_c_ext_fp32_global_atomic_add 1
-#define __opencl_c_ext_fp32_local_atomic_add 1
-#define __opencl_c_ext_fp32_global_atomic_min_max 1
-#define __opencl_c_ext_fp32_local_atomic_min_max 1
-
-#endif // defined(__SPIR__) || defined(__SPIRV__)
-#endif // (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
-
-// Define feature macros for OpenCL C 2.0
-#if (__OPENCL_CPP_VERSION__ == 100 || __OPENCL_C_VERSION__ == 200)
-#define __opencl_c_pipes 1
-#define __opencl_c_generic_address_space 1
-#define __opencl_c_work_group_collective_functions 1
-#define __opencl_c_atomic_order_acq_rel 1
-#define __opencl_c_atomic_order_seq_cst 1
-#define __opencl_c_atomic_scope_device 1
-#define __opencl_c_atomic_scope_all_devices 1
-#define __opencl_c_device_enqueue 1
-#define __opencl_c_read_write_images 1
-#define __opencl_c_program_scope_global_variables 1
-#define __opencl_c_images 1
-#endif
-
-// Define header-only feature macros for OpenCL C 3.0.
-#if (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
-// For the SPIR and SPIR-V target all features are supported.
-#if defined(__SPIR__) || defined(__SPIRV__)
-#define __opencl_c_atomic_scope_all_devices 1
-#endif // defined(__SPIR__)
-#endif // (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
-
-// built-in scalar data types:
-
-/**
- * An unsigned 8-bit integer.
- */
-typedef unsigned char uchar;
-
-/**
- * An unsigned 16-bit integer.
- */
-typedef unsigned short ushort;
-
-/**
- * An unsigned 32-bit integer.
- */
-typedef unsigned int uint;
-
-/**
- * An unsigned 64-bit integer.
- */
-typedef unsigned long ulong;
-
-/**
- * The unsigned integer type of the result of the sizeof operator. This
- * is a 32-bit unsigned integer if CL_DEVICE_ADDRESS_BITS
- * defined in table 4.3 is 32-bits and is a 64-bit unsigned integer if
- * CL_DEVICE_ADDRESS_BITS is 64-bits.
- */
-typedef __SIZE_TYPE__ size_t;
-
-/**
- * A signed integer type that is the result of subtracting two pointers.
- * This is a 32-bit signed integer if CL_DEVICE_ADDRESS_BITS
- * defined in table 4.3 is 32-bits and is a 64-bit signed integer if
- * CL_DEVICE_ADDRESS_BITS is 64-bits.
- */
-typedef __PTRDIFF_TYPE__ ptrdiff_t;
-
-/**
- * A signed integer type with the property that any valid pointer to
- * void can be converted to this type, then converted back to pointer
- * to void, and the result will compare equal to the original pointer.
- */
-typedef __INTPTR_TYPE__ intptr_t;
-
-/**
- * An unsigned integer type with the property that any valid pointer to
- * void can be converted to this type, then converted back to pointer
- * to void, and the result will compare equal to the original pointer.
- */
-typedef __UINTPTR_TYPE__ uintptr_t;
-
-// built-in vector data types:
-typedef char char2 __attribute__((ext_vector_type(2)));
-typedef char char3 __attribute__((ext_vector_type(3)));
-typedef char char4 __attribute__((ext_vector_type(4)));
-typedef char char8 __attribute__((ext_vector_type(8)));
-typedef char char16 __attribute__((ext_vector_type(16)));
-typedef uchar uchar2 __attribute__((ext_vector_type(2)));
-typedef uchar uchar3 __attribute__((ext_vector_type(3)));
-typedef uchar uchar4 __attribute__((ext_vector_type(4)));
-typedef uchar uchar8 __attribute__((ext_vector_type(8)));
-typedef uchar uchar16 __attribute__((ext_vector_type(16)));
-typedef short short2 __attribute__((ext_vector_type(2)));
-typedef short short3 __attribute__((ext_vector_type(3)));
-typedef short short4 __attribute__((ext_vector_type(4)));
-typedef short short8 __attribute__((ext_vector_type(8)));
-typedef short short16 __attribute__((ext_vector_type(16)));
-typedef ushort ushort2 __attribute__((ext_vector_type(2)));
-typedef ushort ushort3 __attribute__((ext_vector_type(3)));
-typedef ushort ushort4 __attribute__((ext_vector_type(4)));
-typedef ushort ushort8 __attribute__((ext_vector_type(8)));
-typedef ushort ushort16 __attribute__((ext_vector_type(16)));
-typedef int int2 __attribute__((ext_vector_type(2)));
-typedef int int3 __attribute__((ext_vector_type(3)));
-typedef int int4 __attribute__((ext_vector_type(4)));
-typedef int int8 __attribute__((ext_vector_type(8)));
-typedef int int16 __attribute__((ext_vector_type(16)));
-typedef uint uint2 __attribute__((ext_vector_type(2)));
-typedef uint uint3 __attribute__((ext_vector_type(3)));
-typedef uint uint4 __attribute__((ext_vector_type(4)));
-typedef uint uint8 __attribute__((ext_vector_type(8)));
-typedef uint uint16 __attribute__((ext_vector_type(16)));
-typedef long long2 __attribute__((ext_vector_type(2)));
-typedef long long3 __attribute__((ext_vector_type(3)));
-typedef long long4 __attribute__((ext_vector_type(4)));
-typedef long long8 __attribute__((ext_vector_type(8)));
-typedef long long16 __attribute__((ext_vector_type(16)));
-typedef ulong ulong2 __attribute__((ext_vector_type(2)));
-typedef ulong ulong3 __attribute__((ext_vector_type(3)));
-typedef ulong ulong4 __attribute__((ext_vector_type(4)));
-typedef ulong ulong8 __attribute__((ext_vector_type(8)));
-typedef ulong ulong16 __attribute__((ext_vector_type(16)));
-typedef float float2 __attribute__((ext_vector_type(2)));
-typedef float float3 __attribute__((ext_vector_type(3)));
-typedef float float4 __attribute__((ext_vector_type(4)));
-typedef float float8 __attribute__((ext_vector_type(8)));
-typedef float float16 __attribute__((ext_vector_type(16)));
-#ifdef cl_khr_fp16
-#pragma OPENCL EXTENSION cl_khr_fp16 : enable
-typedef half half2 __attribute__((ext_vector_type(2)));
-typedef half half3 __attribute__((ext_vector_type(3)));
-typedef half half4 __attribute__((ext_vector_type(4)));
-typedef half half8 __attribute__((ext_vector_type(8)));
-typedef half half16 __attribute__((ext_vector_type(16)));
-#endif
-#ifdef cl_khr_fp64
-#if __OPENCL_C_VERSION__ < CL_VERSION_1_2
-#pragma OPENCL EXTENSION cl_khr_fp64 : enable
-#endif
-typedef double double2 __attribute__((ext_vector_type(2)));
-typedef double double3 __attribute__((ext_vector_type(3)));
-typedef double double4 __attribute__((ext_vector_type(4)));
-typedef double double8 __attribute__((ext_vector_type(8)));
-typedef double double16 __attribute__((ext_vector_type(16)));
-#endif
-
-#if defined(__OPENCL_CPP_VERSION__)
-#define NULL nullptr
-#elif defined(__OPENCL_C_VERSION__)
-#define NULL ((void*)0)
-#endif
-
-/**
- * Value of maximum non-infinite single-precision floating-point
- * number.
- */
-#define MAXFLOAT 0x1.fffffep127f
-
-/**
- * A positive float constant expression. HUGE_VALF evaluates
- * to +infinity. Used as an error value returned by the built-in
- * math functions.
- */
-#define HUGE_VALF (__builtin_huge_valf())
-
-/**
- * A positive double constant expression. HUGE_VAL evaluates
- * to +infinity. Used as an error value returned by the built-in
- * math functions.
- */
-#define HUGE_VAL (__builtin_huge_val())
-
-/**
- * A constant expression of type float representing positive or
- * unsigned infinity.
- */
-#define INFINITY (__builtin_inff())
-
-/**
- * A constant expression of type float representing a quiet NaN.
- */
-#define NAN as_float(INT_MAX)
-
-#define FP_ILOGB0    INT_MIN
-#define FP_ILOGBNAN  INT_MAX
-
-#define FLT_DIG 6
-#define FLT_MANT_DIG 24
-#define FLT_MAX_10_EXP +38
-#define FLT_MAX_EXP +128
-#define FLT_MIN_10_EXP -37
-#define FLT_MIN_EXP -125
-#define FLT_RADIX 2
-#define FLT_MAX 0x1.fffffep127f
-#define FLT_MIN 0x1.0p-126f
-#define FLT_EPSILON 0x1.0p-23f
-
-#define M_E_F         2.71828182845904523536028747135266250f
-#define M_LOG2E_F     1.44269504088896340735992468100189214f
-#define M_LOG10E_F    0.434294481903251827651128918916605082f
-#define M_LN2_F       0.693147180559945309417232121458176568f
-#define M_LN10_F      2.30258509299404568401799145468436421f
-#define M_PI_F        3.14159265358979323846264338327950288f
-#define M_PI_2_F      1.57079632679489661923132169163975144f
-#define M_PI_4_F      0.785398163397448309615660845819875721f
-#define M_1_PI_F      0.318309886183790671537767526745028724f
-#define M_2_PI_F      0.636619772367581343075535053490057448f
-#define M_2_SQRTPI_F  1.12837916709551257389615890312154517f
-#define M_SQRT2_F     1.41421356237309504880168872420969808f
-#define M_SQRT1_2_F   0.707106781186547524400844362104849039f
-
-#define DBL_DIG 15
-#define DBL_MANT_DIG 53
-#define DBL_MAX_10_EXP +308
-#define DBL_MAX_EXP +1024
-#define DBL_MIN_10_EXP -307
-#define DBL_MIN_EXP -1021
-#define DBL_RADIX 2
-#define DBL_MAX 0x1.fffffffffffffp1023
-#define DBL_MIN 0x1.0p-1022
-#define DBL_EPSILON 0x1.0p-52
-
-#define M_E           0x1.5bf0a8b145769p+1
-#define M_LOG2E       0x1.71547652b82fep+0
-#define M_LOG10E      0x1.bcb7b1526e50ep-2
-#define M_LN2         0x1.62e42fefa39efp-1
-#define M_LN10        0x1.26bb1bbb55516p+1
-#define M_PI          0x1.921fb54442d18p+1
-#define M_PI_2        0x1.921fb54442d18p+0
-#define M_PI_4        0x1.921fb54442d18p-1
-#define M_1_PI        0x1.45f306dc9c883p-2
-#define M_2_PI        0x1.45f306dc9c883p-1
-#define M_2_SQRTPI    0x1.20dd750429b6dp+0
-#define M_SQRT2       0x1.6a09e667f3bcdp+0
-#define M_SQRT1_2     0x1.6a09e667f3bcdp-1
-
-#ifdef cl_khr_fp16
-
-#define HALF_DIG 3
-#define HALF_MANT_DIG 11
-#define HALF_MAX_10_EXP +4
-#define HALF_MAX_EXP +16
-#define HALF_MIN_10_EXP -4
-#define HALF_MIN_EXP -13
-#define HALF_RADIX 2
-#define HALF_MAX ((0x1.ffcp15h))
-#define HALF_MIN ((0x1.0p-14h))
-#define HALF_EPSILON ((0x1.0p-10h))
-
-#define M_E_H         2.71828182845904523536028747135266250h
-#define M_LOG2E_H     1.44269504088896340735992468100189214h
-#define M_LOG10E_H    0.434294481903251827651128918916605082h
-#define M_LN2_H       0.693147180559945309417232121458176568h
-#define M_LN10_H      2.30258509299404568401799145468436421h
-#define M_PI_H        3.14159265358979323846264338327950288h
-#define M_PI_2_H      1.57079632679489661923132169163975144h
-#define M_PI_4_H      0.785398163397448309615660845819875721h
-#define M_1_PI_H      0.318309886183790671537767526745028724h
-#define M_2_PI_H      0.636619772367581343075535053490057448h
-#define M_2_SQRTPI_H  1.12837916709551257389615890312154517h
-#define M_SQRT2_H     1.41421356237309504880168872420969808h
-#define M_SQRT1_2_H   0.707106781186547524400844362104849039h
-
-#endif //cl_khr_fp16
-
-#define CHAR_BIT  8
-#define SCHAR_MAX 127
-#define SCHAR_MIN (-128)
-#define UCHAR_MAX 255
-#define CHAR_MAX  SCHAR_MAX
-#define CHAR_MIN  SCHAR_MIN
-#define USHRT_MAX 65535
-#define SHRT_MAX  32767
-#define SHRT_MIN  (-32768)
-#define UINT_MAX  0xffffffff
-#define INT_MAX   2147483647
-#define INT_MIN   (-2147483647-1)
-#define ULONG_MAX 0xffffffffffffffffUL
-#define LONG_MAX  0x7fffffffffffffffL
-#define LONG_MIN  (-0x7fffffffffffffffL-1)
-
-// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
-
-// Flag type and values for barrier, mem_fence, read_mem_fence, write_mem_fence
-typedef uint cl_mem_fence_flags;
-
-/**
- * Queue a memory fence to ensure correct
- * ordering of memory operations to local memory
- */
-#define CLK_LOCAL_MEM_FENCE    0x01
-
-/**
- * Queue a memory fence to ensure correct
- * ordering of memory operations to global memory
- */
-#define CLK_GLOBAL_MEM_FENCE   0x02
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-typedef enum memory_scope {
-  memory_scope_work_item = __OPENCL_MEMORY_SCOPE_WORK_ITEM,
-  memory_scope_work_group = __OPENCL_MEMORY_SCOPE_WORK_GROUP,
-  memory_scope_device = __OPENCL_MEMORY_SCOPE_DEVICE,
-#if defined(__opencl_c_atomic_scope_all_devices)
-  memory_scope_all_svm_devices = __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES,
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-  memory_scope_all_devices = memory_scope_all_svm_devices,
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif // defined(__opencl_c_atomic_scope_all_devices)
-/**
- * Subgroups have different requirements on forward progress, so just test
- * all the relevant macros.
- * CL 3.0 sub-groups "they are not guaranteed to make independent forward progress"
- * KHR subgroups "Subgroups within a workgroup are independent, make forward progress with respect to each other"
- */
-#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
-  memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP
-#endif
-} memory_scope;
-
-/**
- * Queue a memory fence to ensure correct ordering of memory
- * operations between work-items of a work-group to
- * image memory.
- */
-#define CLK_IMAGE_MEM_FENCE  0x04
-
-#ifndef ATOMIC_VAR_INIT
-#define ATOMIC_VAR_INIT(x) (x)
-#endif //ATOMIC_VAR_INIT
-#define ATOMIC_FLAG_INIT 0
-
-// enum values aligned with what clang uses in EmitAtomicExpr()
-typedef enum memory_order
-{
-  memory_order_relaxed = __ATOMIC_RELAXED,
-  memory_order_acquire = __ATOMIC_ACQUIRE,
-  memory_order_release = __ATOMIC_RELEASE,
-  memory_order_acq_rel = __ATOMIC_ACQ_REL,
-#if defined(__opencl_c_atomic_order_seq_cst)
-  memory_order_seq_cst = __ATOMIC_SEQ_CST
-#endif
-} memory_order;
-
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
-
-// These values need to match the runtime equivalent
-//
-// Addressing Mode.
-//
-#define CLK_ADDRESS_NONE                0
-#define CLK_ADDRESS_CLAMP_TO_EDGE       2
-#define CLK_ADDRESS_CLAMP               4
-#define CLK_ADDRESS_REPEAT              6
-#define CLK_ADDRESS_MIRRORED_REPEAT     8
-
-//
-// Coordination Normalization
-//
-#define CLK_NORMALIZED_COORDS_FALSE     0
-#define CLK_NORMALIZED_COORDS_TRUE      1
-
-//
-// Filtering Mode.
-//
-#define CLK_FILTER_NEAREST              0x10
-#define CLK_FILTER_LINEAR               0x20
-
-#ifdef cl_khr_gl_msaa_sharing
-#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
-#endif //cl_khr_gl_msaa_sharing
-
-//
-// Channel Datatype.
-//
-#define CLK_SNORM_INT8        0x10D0
-#define CLK_SNORM_INT16       0x10D1
-#define CLK_UNORM_INT8        0x10D2
-#define CLK_UNORM_INT16       0x10D3
-#define CLK_UNORM_SHORT_565   0x10D4
-#define CLK_UNORM_SHORT_555   0x10D5
-#define CLK_UNORM_INT_101010  0x10D6
-#define CLK_SIGNED_INT8       0x10D7
-#define CLK_SIGNED_INT16      0x10D8
-#define CLK_SIGNED_INT32      0x10D9
-#define CLK_UNSIGNED_INT8     0x10DA
-#define CLK_UNSIGNED_INT16    0x10DB
-#define CLK_UNSIGNED_INT32    0x10DC
-#define CLK_HALF_FLOAT        0x10DD
-#define CLK_FLOAT             0x10DE
-#define CLK_UNORM_INT24       0x10DF
-
-// Channel order, numbering must be aligned with cl_channel_order in cl.h
-//
-#define CLK_R         0x10B0
-#define CLK_A         0x10B1
-#define CLK_RG        0x10B2
-#define CLK_RA        0x10B3
-#define CLK_RGB       0x10B4
-#define CLK_RGBA      0x10B5
-#define CLK_BGRA      0x10B6
-#define CLK_ARGB      0x10B7
-#define CLK_INTENSITY 0x10B8
-#define CLK_LUMINANCE 0x10B9
-#define CLK_Rx                0x10BA
-#define CLK_RGx               0x10BB
-#define CLK_RGBx              0x10BC
-#define CLK_DEPTH             0x10BD
-#define CLK_DEPTH_STENCIL     0x10BE
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-#define CLK_sRGB              0x10BF
-#define CLK_sRGBx             0x10C0
-#define CLK_sRGBA             0x10C1
-#define CLK_sBGRA             0x10C2
-#define CLK_ABGR              0x10C3
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
-
-// OpenCL v2.0 s6.13.16 - Pipe Functions
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t))
-
-// OpenCL v2.0 s6.13.17 - Enqueue Kernels
-#define CL_COMPLETE                                 0x0
-#define CL_RUNNING                                  0x1
-#define CL_SUBMITTED                                0x2
-#define CL_QUEUED                                   0x3
-
-#define CLK_SUCCESS                                 0
-#define CLK_ENQUEUE_FAILURE                         -101
-#define CLK_INVALID_QUEUE                           -102
-#define CLK_INVALID_NDRANGE                         -160
-#define CLK_INVALID_EVENT_WAIT_LIST                 -57
-#define CLK_DEVICE_QUEUE_FULL                       -161
-#define CLK_INVALID_ARG_SIZE                        -51
-#define CLK_EVENT_ALLOCATION_FAILURE                -100
-#define CLK_OUT_OF_RESOURCES                        -5
-
-#define CLK_NULL_QUEUE                              0
-#define CLK_NULL_EVENT (__builtin_astype(((__SIZE_MAX__)), clk_event_t))
-
-// execution model related definitions
-#define CLK_ENQUEUE_FLAGS_NO_WAIT                   0x0
-#define CLK_ENQUEUE_FLAGS_WAIT_KERNEL               0x1
-#define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP           0x2
-
-typedef int kernel_enqueue_flags_t;
-typedef int clk_profiling_info;
-
-// Profiling info name (see capture_event_profiling_info)
-#define CLK_PROFILING_COMMAND_EXEC_TIME 0x1
-
-#define MAX_WORK_DIM 3
-
-typedef struct {
-  unsigned int workDimension;
-  size_t globalWorkOffset[MAX_WORK_DIM];
-  size_t globalWorkSize[MAX_WORK_DIM];
-  size_t localWorkSize[MAX_WORK_DIM];
-} ndrange_t;
-
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * OpenCL v1.1/1.2/2.0 s6.2.4.2 - as_type operators
- * Reinterprets a data type as another data type of the same size
- */
-#define as_char(x) __builtin_astype((x), char)
-#define as_char2(x) __builtin_astype((x), char2)
-#define as_char3(x) __builtin_astype((x), char3)
-#define as_char4(x) __builtin_astype((x), char4)
-#define as_char8(x) __builtin_astype((x), char8)
-#define as_char16(x) __builtin_astype((x), char16)
-
-#define as_uchar(x) __builtin_astype((x), uchar)
-#define as_uchar2(x) __builtin_astype((x), uchar2)
-#define as_uchar3(x) __builtin_astype((x), uchar3)
-#define as_uchar4(x) __builtin_astype((x), uchar4)
-#define as_uchar8(x) __builtin_astype((x), uchar8)
-#define as_uchar16(x) __builtin_astype((x), uchar16)
-
-#define as_short(x) __builtin_astype((x), short)
-#define as_short2(x) __builtin_astype((x), short2)
-#define as_short3(x) __builtin_astype((x), short3)
-#define as_short4(x) __builtin_astype((x), short4)
-#define as_short8(x) __builtin_astype((x), short8)
-#define as_short16(x) __builtin_astype((x), short16)
-
-#define as_ushort(x) __builtin_astype((x), ushort)
-#define as_ushort2(x) __builtin_astype((x), ushort2)
-#define as_ushort3(x) __builtin_astype((x), ushort3)
-#define as_ushort4(x) __builtin_astype((x), ushort4)
-#define as_ushort8(x) __builtin_astype((x), ushort8)
-#define as_ushort16(x) __builtin_astype((x), ushort16)
-
-#define as_int(x) __builtin_astype((x), int)
-#define as_int2(x) __builtin_astype((x), int2)
-#define as_int3(x) __builtin_astype((x), int3)
-#define as_int4(x) __builtin_astype((x), int4)
-#define as_int8(x) __builtin_astype((x), int8)
-#define as_int16(x) __builtin_astype((x), int16)
-
-#define as_uint(x) __builtin_astype((x), uint)
-#define as_uint2(x) __builtin_astype((x), uint2)
-#define as_uint3(x) __builtin_astype((x), uint3)
-#define as_uint4(x) __builtin_astype((x), uint4)
-#define as_uint8(x) __builtin_astype((x), uint8)
-#define as_uint16(x) __builtin_astype((x), uint16)
-
-#define as_long(x) __builtin_astype((x), long)
-#define as_long2(x) __builtin_astype((x), long2)
-#define as_long3(x) __builtin_astype((x), long3)
-#define as_long4(x) __builtin_astype((x), long4)
-#define as_long8(x) __builtin_astype((x), long8)
-#define as_long16(x) __builtin_astype((x), long16)
-
-#define as_ulong(x) __builtin_astype((x), ulong)
-#define as_ulong2(x) __builtin_astype((x), ulong2)
-#define as_ulong3(x) __builtin_astype((x), ulong3)
-#define as_ulong4(x) __builtin_astype((x), ulong4)
-#define as_ulong8(x) __builtin_astype((x), ulong8)
-#define as_ulong16(x) __builtin_astype((x), ulong16)
-
-#define as_float(x) __builtin_astype((x), float)
-#define as_float2(x) __builtin_astype((x), float2)
-#define as_float3(x) __builtin_astype((x), float3)
-#define as_float4(x) __builtin_astype((x), float4)
-#define as_float8(x) __builtin_astype((x), float8)
-#define as_float16(x) __builtin_astype((x), float16)
-
-#ifdef cl_khr_fp64
-#define as_double(x) __builtin_astype((x), double)
-#define as_double2(x) __builtin_astype((x), double2)
-#define as_double3(x) __builtin_astype((x), double3)
-#define as_double4(x) __builtin_astype((x), double4)
-#define as_double8(x) __builtin_astype((x), double8)
-#define as_double16(x) __builtin_astype((x), double16)
-#endif // cl_khr_fp64
-
-#ifdef cl_khr_fp16
-#define as_half(x) __builtin_astype((x), half)
-#define as_half2(x) __builtin_astype((x), half2)
-#define as_half3(x) __builtin_astype((x), half3)
-#define as_half4(x) __builtin_astype((x), half4)
-#define as_half8(x) __builtin_astype((x), half8)
-#define as_half16(x) __builtin_astype((x), half16)
-#endif // cl_khr_fp16
-
-#define as_size_t(x) __builtin_astype((x), size_t)
-#define as_ptrdiff_t(x) __builtin_astype((x), ptrdiff_t)
-#define as_intptr_t(x) __builtin_astype((x), intptr_t)
-#define as_uintptr_t(x) __builtin_astype((x), uintptr_t)
-
-// C++ for OpenCL - __remove_address_space
-#if defined(__OPENCL_CPP_VERSION__)
-template <typename _Tp> struct __remove_address_space { using type = _Tp; };
-template <typename _Tp> struct __remove_address_space<__generic _Tp> {
-  using type = _Tp;
-};
-template <typename _Tp> struct __remove_address_space<__global _Tp> {
-  using type = _Tp;
-};
-template <typename _Tp> struct __remove_address_space<__private _Tp> {
-  using type = _Tp;
-};
-template <typename _Tp> struct __remove_address_space<__local _Tp> {
-  using type = _Tp;
-};
-template <typename _Tp> struct __remove_address_space<__constant _Tp> {
-  using type = _Tp;
-};
-#endif
-
-// OpenCL v1.1 s6.9, v1.2/2.0 s6.10 - Function qualifiers
-
-#define __kernel_exec(X, typen) __kernel \
-	__attribute__((work_group_size_hint(X, 1, 1))) \
-	__attribute__((vec_type_hint(typen)))
-
-#define kernel_exec(X, typen) __kernel \
-	__attribute__((work_group_size_hint(X, 1, 1))) \
-	__attribute__((vec_type_hint(typen)))
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
-
-int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2)));
-#endif
-
-#ifdef cl_intel_device_side_avc_motion_estimation
-
-#define CLK_AVC_ME_MAJOR_16x16_INTEL 0x0
-#define CLK_AVC_ME_MAJOR_16x8_INTEL 0x1
-#define CLK_AVC_ME_MAJOR_8x16_INTEL 0x2
-#define CLK_AVC_ME_MAJOR_8x8_INTEL 0x3
-
-#define CLK_AVC_ME_MINOR_8x8_INTEL 0x0
-#define CLK_AVC_ME_MINOR_8x4_INTEL 0x1
-#define CLK_AVC_ME_MINOR_4x8_INTEL 0x2
-#define CLK_AVC_ME_MINOR_4x4_INTEL 0x3
-
-#define CLK_AVC_ME_MAJOR_FORWARD_INTEL 0x0
-#define CLK_AVC_ME_MAJOR_BACKWARD_INTEL 0x1
-#define CLK_AVC_ME_MAJOR_BIDIRECTIONAL_INTEL 0x2
-
-#define CLK_AVC_ME_PARTITION_MASK_ALL_INTEL 0x0
-#define CLK_AVC_ME_PARTITION_MASK_16x16_INTEL 0x7E
-#define CLK_AVC_ME_PARTITION_MASK_16x8_INTEL 0x7D
-#define CLK_AVC_ME_PARTITION_MASK_8x16_INTEL 0x7B
-#define CLK_AVC_ME_PARTITION_MASK_8x8_INTEL 0x77
-#define CLK_AVC_ME_PARTITION_MASK_8x4_INTEL 0x6F
-#define CLK_AVC_ME_PARTITION_MASK_4x8_INTEL 0x5F
-#define CLK_AVC_ME_PARTITION_MASK_4x4_INTEL 0x3F
-
-#define CLK_AVC_ME_SLICE_TYPE_PRED_INTEL 0x0
-#define CLK_AVC_ME_SLICE_TYPE_BPRED_INTEL 0x1
-#define CLK_AVC_ME_SLICE_TYPE_INTRA_INTEL 0x2
-
-#define CLK_AVC_ME_SEARCH_WINDOW_EXHAUSTIVE_INTEL 0x0
-#define CLK_AVC_ME_SEARCH_WINDOW_SMALL_INTEL 0x1
-#define CLK_AVC_ME_SEARCH_WINDOW_TINY_INTEL 0x2
-#define CLK_AVC_ME_SEARCH_WINDOW_EXTRA_TINY_INTEL 0x3
-#define CLK_AVC_ME_SEARCH_WINDOW_DIAMOND_INTEL 0x4
-#define CLK_AVC_ME_SEARCH_WINDOW_LARGE_DIAMOND_INTEL 0x5
-#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED0_INTEL 0x6
-#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED1_INTEL 0x7
-#define CLK_AVC_ME_SEARCH_WINDOW_CUSTOM_INTEL 0x8
-
-#define CLK_AVC_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0
-#define CLK_AVC_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x2
-
-#define CLK_AVC_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0
-#define CLK_AVC_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1
-#define CLK_AVC_ME_SUBPIXEL_MODE_QPEL_INTEL 0x3
-
-#define CLK_AVC_ME_COST_PRECISION_QPEL_INTEL 0x0
-#define CLK_AVC_ME_COST_PRECISION_HPEL_INTEL 0x1
-#define CLK_AVC_ME_COST_PRECISION_PEL_INTEL 0x2
-#define CLK_AVC_ME_COST_PRECISION_DPEL_INTEL 0x3
-
-#define CLK_AVC_ME_BIDIR_WEIGHT_QUARTER_INTEL 0x10
-#define CLK_AVC_ME_BIDIR_WEIGHT_THIRD_INTEL 0x15
-#define CLK_AVC_ME_BIDIR_WEIGHT_HALF_INTEL 0x20
-#define CLK_AVC_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 0x2B
-#define CLK_AVC_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 0x30
-
-#define CLK_AVC_ME_BORDER_REACHED_LEFT_INTEL 0x0
-#define CLK_AVC_ME_BORDER_REACHED_RIGHT_INTEL 0x2
-#define CLK_AVC_ME_BORDER_REACHED_TOP_INTEL 0x4
-#define CLK_AVC_ME_BORDER_REACHED_BOTTOM_INTEL 0x8
-
-#define CLK_AVC_ME_INTRA_16x16_INTEL 0x0
-#define CLK_AVC_ME_INTRA_8x8_INTEL 0x1
-#define CLK_AVC_ME_INTRA_4x4_INTEL 0x2
-
-#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_16x16_INTEL 0x0
-#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_8x8_INTEL 0x4000
-
-#define CLK_AVC_ME_SKIP_BLOCK_16x16_FORWARD_ENABLE_INTEL (0x1 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_16x16_BACKWARD_ENABLE_INTEL (0x2 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_16x16_DUAL_ENABLE_INTEL (0x3 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_FORWARD_ENABLE_INTEL (0x55 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_BACKWARD_ENABLE_INTEL (0xAA << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_DUAL_ENABLE_INTEL (0xFF << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_FORWARD_ENABLE_INTEL (0x1 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_BACKWARD_ENABLE_INTEL (0x2 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_FORWARD_ENABLE_INTEL (0x1 << 26)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_BACKWARD_ENABLE_INTEL (0x2 << 26)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_FORWARD_ENABLE_INTEL (0x1 << 28)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_BACKWARD_ENABLE_INTEL (0x2 << 28)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_FORWARD_ENABLE_INTEL (0x1 << 30)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_BACKWARD_ENABLE_INTEL (0x2 << 30)
-
-#define CLK_AVC_ME_BLOCK_BASED_SKIP_4x4_INTEL 0x00
-#define CLK_AVC_ME_BLOCK_BASED_SKIP_8x8_INTEL 0x80
-
-#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_ALL_INTEL 0x0
-#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_16x16_INTEL 0x6
-#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_8x8_INTEL 0x5
-#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_4x4_INTEL 0x3
-
-#define CLK_AVC_ME_INTRA_NEIGHBOR_LEFT_MASK_ENABLE_INTEL 0x60
-#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_MASK_ENABLE_INTEL 0x10
-#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_RIGHT_MASK_ENABLE_INTEL 0x8
-#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_LEFT_MASK_ENABLE_INTEL 0x4
-
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8
-#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0
-#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
-#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2
-#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3
-
-#define CLK_AVC_ME_FRAME_FORWARD_INTEL 0x1
-#define CLK_AVC_ME_FRAME_BACKWARD_INTEL 0x2
-#define CLK_AVC_ME_FRAME_DUAL_INTEL 0x3
-
-#define CLK_AVC_ME_INTERLACED_SCAN_TOP_FIELD_INTEL 0x0
-#define CLK_AVC_ME_INTERLACED_SCAN_BOTTOM_FIELD_INTEL 0x1
-
-#define CLK_AVC_ME_INITIALIZE_INTEL 0x0
-
-#define CLK_AVC_IME_PAYLOAD_INITIALIZE_INTEL 0x0
-#define CLK_AVC_REF_PAYLOAD_INITIALIZE_INTEL 0x0
-#define CLK_AVC_SIC_PAYLOAD_INITIALIZE_INTEL 0x0
-
-#define CLK_AVC_IME_RESULT_INITIALIZE_INTEL 0x0
-#define CLK_AVC_REF_RESULT_INITIALIZE_INTEL 0x0
-#define CLK_AVC_SIC_RESULT_INITIALIZE_INTEL 0x0
-
-#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
-#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
-#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
-#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
-
-#endif // cl_intel_device_side_avc_motion_estimation
-
-// Disable any extensions we may have enabled previously.
-#pragma OPENCL EXTENSION all : disable
-
-#endif //_OPENCL_BASE_H_
diff --git a/linux-x86/lib64/clang/14.0.2/include/opencl-c.h b/linux-x86/lib64/clang/14.0.2/include/opencl-c.h
deleted file mode 100644
index 32af848..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/opencl-c.h
+++ /dev/null
@@ -1,18498 +0,0 @@
-//===--- opencl-c.h - OpenCL C language builtin function header -----------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _OPENCL_H_
-#define _OPENCL_H_
-
-#include "opencl-c-base.h"
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifndef cl_khr_depth_images
-#define cl_khr_depth_images
-#endif //cl_khr_depth_images
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-#if __OPENCL_C_VERSION__ < CL_VERSION_2_0
-#ifdef cl_khr_3d_image_writes
-#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable
-#endif //cl_khr_3d_image_writes
-#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
-
-#if (defined(__OPENCL_CPP_VERSION__) ||                                        \
-     (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) &&                              \
-    (defined(__SPIR__) || defined(__SPIRV__))
-#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin
-#pragma OPENCL EXTENSION cl_intel_planar_yuv : end
-#endif // (defined(__OPENCL_CPP_VERSION__) ||
-       //  (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) &&
-       // (defined(__SPIR__) || defined(__SPIRV__))
-
-#define __ovld __attribute__((overloadable))
-#define __conv __attribute__((convergent))
-
-// Optimizations
-#define __purefn __attribute__((pure))
-#define __cnfn __attribute__((const))
-
-
-// OpenCL v1.1/1.2/2.0 s6.2.3 - Explicit conversions
-
-char __ovld __cnfn convert_char_rte(char);
-char __ovld __cnfn convert_char_sat_rte(char);
-char __ovld __cnfn convert_char_rtz(char);
-char __ovld __cnfn convert_char_sat_rtz(char);
-char __ovld __cnfn convert_char_rtp(char);
-char __ovld __cnfn convert_char_sat_rtp(char);
-char __ovld __cnfn convert_char_rtn(char);
-char __ovld __cnfn convert_char_sat_rtn(char);
-char __ovld __cnfn convert_char(char);
-char __ovld __cnfn convert_char_sat(char);
-char __ovld __cnfn convert_char_rte(uchar);
-char __ovld __cnfn convert_char_sat_rte(uchar);
-char __ovld __cnfn convert_char_rtz(uchar);
-char __ovld __cnfn convert_char_sat_rtz(uchar);
-char __ovld __cnfn convert_char_rtp(uchar);
-char __ovld __cnfn convert_char_sat_rtp(uchar);
-char __ovld __cnfn convert_char_rtn(uchar);
-char __ovld __cnfn convert_char_sat_rtn(uchar);
-char __ovld __cnfn convert_char(uchar);
-char __ovld __cnfn convert_char_sat(uchar);
-char __ovld __cnfn convert_char_rte(short);
-char __ovld __cnfn convert_char_sat_rte(short);
-char __ovld __cnfn convert_char_rtz(short);
-char __ovld __cnfn convert_char_sat_rtz(short);
-char __ovld __cnfn convert_char_rtp(short);
-char __ovld __cnfn convert_char_sat_rtp(short);
-char __ovld __cnfn convert_char_rtn(short);
-char __ovld __cnfn convert_char_sat_rtn(short);
-char __ovld __cnfn convert_char(short);
-char __ovld __cnfn convert_char_sat(short);
-char __ovld __cnfn convert_char_rte(ushort);
-char __ovld __cnfn convert_char_sat_rte(ushort);
-char __ovld __cnfn convert_char_rtz(ushort);
-char __ovld __cnfn convert_char_sat_rtz(ushort);
-char __ovld __cnfn convert_char_rtp(ushort);
-char __ovld __cnfn convert_char_sat_rtp(ushort);
-char __ovld __cnfn convert_char_rtn(ushort);
-char __ovld __cnfn convert_char_sat_rtn(ushort);
-char __ovld __cnfn convert_char(ushort);
-char __ovld __cnfn convert_char_sat(ushort);
-char __ovld __cnfn convert_char_rte(int);
-char __ovld __cnfn convert_char_sat_rte(int);
-char __ovld __cnfn convert_char_rtz(int);
-char __ovld __cnfn convert_char_sat_rtz(int);
-char __ovld __cnfn convert_char_rtp(int);
-char __ovld __cnfn convert_char_sat_rtp(int);
-char __ovld __cnfn convert_char_rtn(int);
-char __ovld __cnfn convert_char_sat_rtn(int);
-char __ovld __cnfn convert_char(int);
-char __ovld __cnfn convert_char_sat(int);
-char __ovld __cnfn convert_char_rte(uint);
-char __ovld __cnfn convert_char_sat_rte(uint);
-char __ovld __cnfn convert_char_rtz(uint);
-char __ovld __cnfn convert_char_sat_rtz(uint);
-char __ovld __cnfn convert_char_rtp(uint);
-char __ovld __cnfn convert_char_sat_rtp(uint);
-char __ovld __cnfn convert_char_rtn(uint);
-char __ovld __cnfn convert_char_sat_rtn(uint);
-char __ovld __cnfn convert_char(uint);
-char __ovld __cnfn convert_char_sat(uint);
-char __ovld __cnfn convert_char_rte(long);
-char __ovld __cnfn convert_char_sat_rte(long);
-char __ovld __cnfn convert_char_rtz(long);
-char __ovld __cnfn convert_char_sat_rtz(long);
-char __ovld __cnfn convert_char_rtp(long);
-char __ovld __cnfn convert_char_sat_rtp(long);
-char __ovld __cnfn convert_char_rtn(long);
-char __ovld __cnfn convert_char_sat_rtn(long);
-char __ovld __cnfn convert_char(long);
-char __ovld __cnfn convert_char_sat(long);
-char __ovld __cnfn convert_char_rte(ulong);
-char __ovld __cnfn convert_char_sat_rte(ulong);
-char __ovld __cnfn convert_char_rtz(ulong);
-char __ovld __cnfn convert_char_sat_rtz(ulong);
-char __ovld __cnfn convert_char_rtp(ulong);
-char __ovld __cnfn convert_char_sat_rtp(ulong);
-char __ovld __cnfn convert_char_rtn(ulong);
-char __ovld __cnfn convert_char_sat_rtn(ulong);
-char __ovld __cnfn convert_char(ulong);
-char __ovld __cnfn convert_char_sat(ulong);
-char __ovld __cnfn convert_char_rte(float);
-char __ovld __cnfn convert_char_sat_rte(float);
-char __ovld __cnfn convert_char_rtz(float);
-char __ovld __cnfn convert_char_sat_rtz(float);
-char __ovld __cnfn convert_char_rtp(float);
-char __ovld __cnfn convert_char_sat_rtp(float);
-char __ovld __cnfn convert_char_rtn(float);
-char __ovld __cnfn convert_char_sat_rtn(float);
-char __ovld __cnfn convert_char(float);
-char __ovld __cnfn convert_char_sat(float);
-uchar __ovld __cnfn convert_uchar_rte(char);
-uchar __ovld __cnfn convert_uchar_sat_rte(char);
-uchar __ovld __cnfn convert_uchar_rtz(char);
-uchar __ovld __cnfn convert_uchar_sat_rtz(char);
-uchar __ovld __cnfn convert_uchar_rtp(char);
-uchar __ovld __cnfn convert_uchar_sat_rtp(char);
-uchar __ovld __cnfn convert_uchar_rtn(char);
-uchar __ovld __cnfn convert_uchar_sat_rtn(char);
-uchar __ovld __cnfn convert_uchar(char);
-uchar __ovld __cnfn convert_uchar_sat(char);
-uchar __ovld __cnfn convert_uchar_rte(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rte(uchar);
-uchar __ovld __cnfn convert_uchar_rtz(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rtz(uchar);
-uchar __ovld __cnfn convert_uchar_rtp(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rtp(uchar);
-uchar __ovld __cnfn convert_uchar_rtn(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rtn(uchar);
-uchar __ovld __cnfn convert_uchar(uchar);
-uchar __ovld __cnfn convert_uchar_sat(uchar);
-uchar __ovld __cnfn convert_uchar_rte(short);
-uchar __ovld __cnfn convert_uchar_sat_rte(short);
-uchar __ovld __cnfn convert_uchar_rtz(short);
-uchar __ovld __cnfn convert_uchar_sat_rtz(short);
-uchar __ovld __cnfn convert_uchar_rtp(short);
-uchar __ovld __cnfn convert_uchar_sat_rtp(short);
-uchar __ovld __cnfn convert_uchar_rtn(short);
-uchar __ovld __cnfn convert_uchar_sat_rtn(short);
-uchar __ovld __cnfn convert_uchar(short);
-uchar __ovld __cnfn convert_uchar_sat(short);
-uchar __ovld __cnfn convert_uchar_rte(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rte(ushort);
-uchar __ovld __cnfn convert_uchar_rtz(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rtz(ushort);
-uchar __ovld __cnfn convert_uchar_rtp(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rtp(ushort);
-uchar __ovld __cnfn convert_uchar_rtn(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rtn(ushort);
-uchar __ovld __cnfn convert_uchar(ushort);
-uchar __ovld __cnfn convert_uchar_sat(ushort);
-uchar __ovld __cnfn convert_uchar_rte(int);
-uchar __ovld __cnfn convert_uchar_sat_rte(int);
-uchar __ovld __cnfn convert_uchar_rtz(int);
-uchar __ovld __cnfn convert_uchar_sat_rtz(int);
-uchar __ovld __cnfn convert_uchar_rtp(int);
-uchar __ovld __cnfn convert_uchar_sat_rtp(int);
-uchar __ovld __cnfn convert_uchar_rtn(int);
-uchar __ovld __cnfn convert_uchar_sat_rtn(int);
-uchar __ovld __cnfn convert_uchar(int);
-uchar __ovld __cnfn convert_uchar_sat(int);
-uchar __ovld __cnfn convert_uchar_rte(uint);
-uchar __ovld __cnfn convert_uchar_sat_rte(uint);
-uchar __ovld __cnfn convert_uchar_rtz(uint);
-uchar __ovld __cnfn convert_uchar_sat_rtz(uint);
-uchar __ovld __cnfn convert_uchar_rtp(uint);
-uchar __ovld __cnfn convert_uchar_sat_rtp(uint);
-uchar __ovld __cnfn convert_uchar_rtn(uint);
-uchar __ovld __cnfn convert_uchar_sat_rtn(uint);
-uchar __ovld __cnfn convert_uchar(uint);
-uchar __ovld __cnfn convert_uchar_sat(uint);
-uchar __ovld __cnfn convert_uchar_rte(long);
-uchar __ovld __cnfn convert_uchar_sat_rte(long);
-uchar __ovld __cnfn convert_uchar_rtz(long);
-uchar __ovld __cnfn convert_uchar_sat_rtz(long);
-uchar __ovld __cnfn convert_uchar_rtp(long);
-uchar __ovld __cnfn convert_uchar_sat_rtp(long);
-uchar __ovld __cnfn convert_uchar_rtn(long);
-uchar __ovld __cnfn convert_uchar_sat_rtn(long);
-uchar __ovld __cnfn convert_uchar(long);
-uchar __ovld __cnfn convert_uchar_sat(long);
-uchar __ovld __cnfn convert_uchar_rte(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rte(ulong);
-uchar __ovld __cnfn convert_uchar_rtz(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rtz(ulong);
-uchar __ovld __cnfn convert_uchar_rtp(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rtp(ulong);
-uchar __ovld __cnfn convert_uchar_rtn(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rtn(ulong);
-uchar __ovld __cnfn convert_uchar(ulong);
-uchar __ovld __cnfn convert_uchar_sat(ulong);
-uchar __ovld __cnfn convert_uchar_rte(float);
-uchar __ovld __cnfn convert_uchar_sat_rte(float);
-uchar __ovld __cnfn convert_uchar_rtz(float);
-uchar __ovld __cnfn convert_uchar_sat_rtz(float);
-uchar __ovld __cnfn convert_uchar_rtp(float);
-uchar __ovld __cnfn convert_uchar_sat_rtp(float);
-uchar __ovld __cnfn convert_uchar_rtn(float);
-uchar __ovld __cnfn convert_uchar_sat_rtn(float);
-uchar __ovld __cnfn convert_uchar(float);
-uchar __ovld __cnfn convert_uchar_sat(float);
-
-short __ovld __cnfn convert_short_rte(char);
-short __ovld __cnfn convert_short_sat_rte(char);
-short __ovld __cnfn convert_short_rtz(char);
-short __ovld __cnfn convert_short_sat_rtz(char);
-short __ovld __cnfn convert_short_rtp(char);
-short __ovld __cnfn convert_short_sat_rtp(char);
-short __ovld __cnfn convert_short_rtn(char);
-short __ovld __cnfn convert_short_sat_rtn(char);
-short __ovld __cnfn convert_short(char);
-short __ovld __cnfn convert_short_sat(char);
-short __ovld __cnfn convert_short_rte(uchar);
-short __ovld __cnfn convert_short_sat_rte(uchar);
-short __ovld __cnfn convert_short_rtz(uchar);
-short __ovld __cnfn convert_short_sat_rtz(uchar);
-short __ovld __cnfn convert_short_rtp(uchar);
-short __ovld __cnfn convert_short_sat_rtp(uchar);
-short __ovld __cnfn convert_short_rtn(uchar);
-short __ovld __cnfn convert_short_sat_rtn(uchar);
-short __ovld __cnfn convert_short(uchar);
-short __ovld __cnfn convert_short_sat(uchar);
-short __ovld __cnfn convert_short_rte(short);
-short __ovld __cnfn convert_short_sat_rte(short);
-short __ovld __cnfn convert_short_rtz(short);
-short __ovld __cnfn convert_short_sat_rtz(short);
-short __ovld __cnfn convert_short_rtp(short);
-short __ovld __cnfn convert_short_sat_rtp(short);
-short __ovld __cnfn convert_short_rtn(short);
-short __ovld __cnfn convert_short_sat_rtn(short);
-short __ovld __cnfn convert_short(short);
-short __ovld __cnfn convert_short_sat(short);
-short __ovld __cnfn convert_short_rte(ushort);
-short __ovld __cnfn convert_short_sat_rte(ushort);
-short __ovld __cnfn convert_short_rtz(ushort);
-short __ovld __cnfn convert_short_sat_rtz(ushort);
-short __ovld __cnfn convert_short_rtp(ushort);
-short __ovld __cnfn convert_short_sat_rtp(ushort);
-short __ovld __cnfn convert_short_rtn(ushort);
-short __ovld __cnfn convert_short_sat_rtn(ushort);
-short __ovld __cnfn convert_short(ushort);
-short __ovld __cnfn convert_short_sat(ushort);
-short __ovld __cnfn convert_short_rte(int);
-short __ovld __cnfn convert_short_sat_rte(int);
-short __ovld __cnfn convert_short_rtz(int);
-short __ovld __cnfn convert_short_sat_rtz(int);
-short __ovld __cnfn convert_short_rtp(int);
-short __ovld __cnfn convert_short_sat_rtp(int);
-short __ovld __cnfn convert_short_rtn(int);
-short __ovld __cnfn convert_short_sat_rtn(int);
-short __ovld __cnfn convert_short(int);
-short __ovld __cnfn convert_short_sat(int);
-short __ovld __cnfn convert_short_rte(uint);
-short __ovld __cnfn convert_short_sat_rte(uint);
-short __ovld __cnfn convert_short_rtz(uint);
-short __ovld __cnfn convert_short_sat_rtz(uint);
-short __ovld __cnfn convert_short_rtp(uint);
-short __ovld __cnfn convert_short_sat_rtp(uint);
-short __ovld __cnfn convert_short_rtn(uint);
-short __ovld __cnfn convert_short_sat_rtn(uint);
-short __ovld __cnfn convert_short(uint);
-short __ovld __cnfn convert_short_sat(uint);
-short __ovld __cnfn convert_short_rte(long);
-short __ovld __cnfn convert_short_sat_rte(long);
-short __ovld __cnfn convert_short_rtz(long);
-short __ovld __cnfn convert_short_sat_rtz(long);
-short __ovld __cnfn convert_short_rtp(long);
-short __ovld __cnfn convert_short_sat_rtp(long);
-short __ovld __cnfn convert_short_rtn(long);
-short __ovld __cnfn convert_short_sat_rtn(long);
-short __ovld __cnfn convert_short(long);
-short __ovld __cnfn convert_short_sat(long);
-short __ovld __cnfn convert_short_rte(ulong);
-short __ovld __cnfn convert_short_sat_rte(ulong);
-short __ovld __cnfn convert_short_rtz(ulong);
-short __ovld __cnfn convert_short_sat_rtz(ulong);
-short __ovld __cnfn convert_short_rtp(ulong);
-short __ovld __cnfn convert_short_sat_rtp(ulong);
-short __ovld __cnfn convert_short_rtn(ulong);
-short __ovld __cnfn convert_short_sat_rtn(ulong);
-short __ovld __cnfn convert_short(ulong);
-short __ovld __cnfn convert_short_sat(ulong);
-short __ovld __cnfn convert_short_rte(float);
-short __ovld __cnfn convert_short_sat_rte(float);
-short __ovld __cnfn convert_short_rtz(float);
-short __ovld __cnfn convert_short_sat_rtz(float);
-short __ovld __cnfn convert_short_rtp(float);
-short __ovld __cnfn convert_short_sat_rtp(float);
-short __ovld __cnfn convert_short_rtn(float);
-short __ovld __cnfn convert_short_sat_rtn(float);
-short __ovld __cnfn convert_short(float);
-short __ovld __cnfn convert_short_sat(float);
-ushort __ovld __cnfn convert_ushort_rte(char);
-ushort __ovld __cnfn convert_ushort_sat_rte(char);
-ushort __ovld __cnfn convert_ushort_rtz(char);
-ushort __ovld __cnfn convert_ushort_sat_rtz(char);
-ushort __ovld __cnfn convert_ushort_rtp(char);
-ushort __ovld __cnfn convert_ushort_sat_rtp(char);
-ushort __ovld __cnfn convert_ushort_rtn(char);
-ushort __ovld __cnfn convert_ushort_sat_rtn(char);
-ushort __ovld __cnfn convert_ushort(char);
-ushort __ovld __cnfn convert_ushort_sat(char);
-ushort __ovld __cnfn convert_ushort_rte(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rte(uchar);
-ushort __ovld __cnfn convert_ushort_rtz(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rtz(uchar);
-ushort __ovld __cnfn convert_ushort_rtp(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rtp(uchar);
-ushort __ovld __cnfn convert_ushort_rtn(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rtn(uchar);
-ushort __ovld __cnfn convert_ushort(uchar);
-ushort __ovld __cnfn convert_ushort_sat(uchar);
-ushort __ovld __cnfn convert_ushort_rte(short);
-ushort __ovld __cnfn convert_ushort_sat_rte(short);
-ushort __ovld __cnfn convert_ushort_rtz(short);
-ushort __ovld __cnfn convert_ushort_sat_rtz(short);
-ushort __ovld __cnfn convert_ushort_rtp(short);
-ushort __ovld __cnfn convert_ushort_sat_rtp(short);
-ushort __ovld __cnfn convert_ushort_rtn(short);
-ushort __ovld __cnfn convert_ushort_sat_rtn(short);
-ushort __ovld __cnfn convert_ushort(short);
-ushort __ovld __cnfn convert_ushort_sat(short);
-ushort __ovld __cnfn convert_ushort_rte(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rte(ushort);
-ushort __ovld __cnfn convert_ushort_rtz(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rtz(ushort);
-ushort __ovld __cnfn convert_ushort_rtp(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rtp(ushort);
-ushort __ovld __cnfn convert_ushort_rtn(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rtn(ushort);
-ushort __ovld __cnfn convert_ushort(ushort);
-ushort __ovld __cnfn convert_ushort_sat(ushort);
-ushort __ovld __cnfn convert_ushort_rte(int);
-ushort __ovld __cnfn convert_ushort_sat_rte(int);
-ushort __ovld __cnfn convert_ushort_rtz(int);
-ushort __ovld __cnfn convert_ushort_sat_rtz(int);
-ushort __ovld __cnfn convert_ushort_rtp(int);
-ushort __ovld __cnfn convert_ushort_sat_rtp(int);
-ushort __ovld __cnfn convert_ushort_rtn(int);
-ushort __ovld __cnfn convert_ushort_sat_rtn(int);
-ushort __ovld __cnfn convert_ushort(int);
-ushort __ovld __cnfn convert_ushort_sat(int);
-ushort __ovld __cnfn convert_ushort_rte(uint);
-ushort __ovld __cnfn convert_ushort_sat_rte(uint);
-ushort __ovld __cnfn convert_ushort_rtz(uint);
-ushort __ovld __cnfn convert_ushort_sat_rtz(uint);
-ushort __ovld __cnfn convert_ushort_rtp(uint);
-ushort __ovld __cnfn convert_ushort_sat_rtp(uint);
-ushort __ovld __cnfn convert_ushort_rtn(uint);
-ushort __ovld __cnfn convert_ushort_sat_rtn(uint);
-ushort __ovld __cnfn convert_ushort(uint);
-ushort __ovld __cnfn convert_ushort_sat(uint);
-ushort __ovld __cnfn convert_ushort_rte(long);
-ushort __ovld __cnfn convert_ushort_sat_rte(long);
-ushort __ovld __cnfn convert_ushort_rtz(long);
-ushort __ovld __cnfn convert_ushort_sat_rtz(long);
-ushort __ovld __cnfn convert_ushort_rtp(long);
-ushort __ovld __cnfn convert_ushort_sat_rtp(long);
-ushort __ovld __cnfn convert_ushort_rtn(long);
-ushort __ovld __cnfn convert_ushort_sat_rtn(long);
-ushort __ovld __cnfn convert_ushort(long);
-ushort __ovld __cnfn convert_ushort_sat(long);
-ushort __ovld __cnfn convert_ushort_rte(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rte(ulong);
-ushort __ovld __cnfn convert_ushort_rtz(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rtz(ulong);
-ushort __ovld __cnfn convert_ushort_rtp(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rtp(ulong);
-ushort __ovld __cnfn convert_ushort_rtn(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rtn(ulong);
-ushort __ovld __cnfn convert_ushort(ulong);
-ushort __ovld __cnfn convert_ushort_sat(ulong);
-ushort __ovld __cnfn convert_ushort_rte(float);
-ushort __ovld __cnfn convert_ushort_sat_rte(float);
-ushort __ovld __cnfn convert_ushort_rtz(float);
-ushort __ovld __cnfn convert_ushort_sat_rtz(float);
-ushort __ovld __cnfn convert_ushort_rtp(float);
-ushort __ovld __cnfn convert_ushort_sat_rtp(float);
-ushort __ovld __cnfn convert_ushort_rtn(float);
-ushort __ovld __cnfn convert_ushort_sat_rtn(float);
-ushort __ovld __cnfn convert_ushort(float);
-ushort __ovld __cnfn convert_ushort_sat(float);
-int __ovld __cnfn convert_int_rte(char);
-int __ovld __cnfn convert_int_sat_rte(char);
-int __ovld __cnfn convert_int_rtz(char);
-int __ovld __cnfn convert_int_sat_rtz(char);
-int __ovld __cnfn convert_int_rtp(char);
-int __ovld __cnfn convert_int_sat_rtp(char);
-int __ovld __cnfn convert_int_rtn(char);
-int __ovld __cnfn convert_int_sat_rtn(char);
-int __ovld __cnfn convert_int(char);
-int __ovld __cnfn convert_int_sat(char);
-int __ovld __cnfn convert_int_rte(uchar);
-int __ovld __cnfn convert_int_sat_rte(uchar);
-int __ovld __cnfn convert_int_rtz(uchar);
-int __ovld __cnfn convert_int_sat_rtz(uchar);
-int __ovld __cnfn convert_int_rtp(uchar);
-int __ovld __cnfn convert_int_sat_rtp(uchar);
-int __ovld __cnfn convert_int_rtn(uchar);
-int __ovld __cnfn convert_int_sat_rtn(uchar);
-int __ovld __cnfn convert_int(uchar);
-int __ovld __cnfn convert_int_sat(uchar);
-int __ovld __cnfn convert_int_rte(short);
-int __ovld __cnfn convert_int_sat_rte(short);
-int __ovld __cnfn convert_int_rtz(short);
-int __ovld __cnfn convert_int_sat_rtz(short);
-int __ovld __cnfn convert_int_rtp(short);
-int __ovld __cnfn convert_int_sat_rtp(short);
-int __ovld __cnfn convert_int_rtn(short);
-int __ovld __cnfn convert_int_sat_rtn(short);
-int __ovld __cnfn convert_int(short);
-int __ovld __cnfn convert_int_sat(short);
-int __ovld __cnfn convert_int_rte(ushort);
-int __ovld __cnfn convert_int_sat_rte(ushort);
-int __ovld __cnfn convert_int_rtz(ushort);
-int __ovld __cnfn convert_int_sat_rtz(ushort);
-int __ovld __cnfn convert_int_rtp(ushort);
-int __ovld __cnfn convert_int_sat_rtp(ushort);
-int __ovld __cnfn convert_int_rtn(ushort);
-int __ovld __cnfn convert_int_sat_rtn(ushort);
-int __ovld __cnfn convert_int(ushort);
-int __ovld __cnfn convert_int_sat(ushort);
-int __ovld __cnfn convert_int_rte(int);
-int __ovld __cnfn convert_int_sat_rte(int);
-int __ovld __cnfn convert_int_rtz(int);
-int __ovld __cnfn convert_int_sat_rtz(int);
-int __ovld __cnfn convert_int_rtp(int);
-int __ovld __cnfn convert_int_sat_rtp(int);
-int __ovld __cnfn convert_int_rtn(int);
-int __ovld __cnfn convert_int_sat_rtn(int);
-int __ovld __cnfn convert_int(int);
-int __ovld __cnfn convert_int_sat(int);
-int __ovld __cnfn convert_int_rte(uint);
-int __ovld __cnfn convert_int_sat_rte(uint);
-int __ovld __cnfn convert_int_rtz(uint);
-int __ovld __cnfn convert_int_sat_rtz(uint);
-int __ovld __cnfn convert_int_rtp(uint);
-int __ovld __cnfn convert_int_sat_rtp(uint);
-int __ovld __cnfn convert_int_rtn(uint);
-int __ovld __cnfn convert_int_sat_rtn(uint);
-int __ovld __cnfn convert_int(uint);
-int __ovld __cnfn convert_int_sat(uint);
-int __ovld __cnfn convert_int_rte(long);
-int __ovld __cnfn convert_int_sat_rte(long);
-int __ovld __cnfn convert_int_rtz(long);
-int __ovld __cnfn convert_int_sat_rtz(long);
-int __ovld __cnfn convert_int_rtp(long);
-int __ovld __cnfn convert_int_sat_rtp(long);
-int __ovld __cnfn convert_int_rtn(long);
-int __ovld __cnfn convert_int_sat_rtn(long);
-int __ovld __cnfn convert_int(long);
-int __ovld __cnfn convert_int_sat(long);
-int __ovld __cnfn convert_int_rte(ulong);
-int __ovld __cnfn convert_int_sat_rte(ulong);
-int __ovld __cnfn convert_int_rtz(ulong);
-int __ovld __cnfn convert_int_sat_rtz(ulong);
-int __ovld __cnfn convert_int_rtp(ulong);
-int __ovld __cnfn convert_int_sat_rtp(ulong);
-int __ovld __cnfn convert_int_rtn(ulong);
-int __ovld __cnfn convert_int_sat_rtn(ulong);
-int __ovld __cnfn convert_int(ulong);
-int __ovld __cnfn convert_int_sat(ulong);
-int __ovld __cnfn convert_int_rte(float);
-int __ovld __cnfn convert_int_sat_rte(float);
-int __ovld __cnfn convert_int_rtz(float);
-int __ovld __cnfn convert_int_sat_rtz(float);
-int __ovld __cnfn convert_int_rtp(float);
-int __ovld __cnfn convert_int_sat_rtp(float);
-int __ovld __cnfn convert_int_rtn(float);
-int __ovld __cnfn convert_int_sat_rtn(float);
-int __ovld __cnfn convert_int(float);
-int __ovld __cnfn convert_int_sat(float);
-uint __ovld __cnfn convert_uint_rte(char);
-uint __ovld __cnfn convert_uint_sat_rte(char);
-uint __ovld __cnfn convert_uint_rtz(char);
-uint __ovld __cnfn convert_uint_sat_rtz(char);
-uint __ovld __cnfn convert_uint_rtp(char);
-uint __ovld __cnfn convert_uint_sat_rtp(char);
-uint __ovld __cnfn convert_uint_rtn(char);
-uint __ovld __cnfn convert_uint_sat_rtn(char);
-uint __ovld __cnfn convert_uint(char);
-uint __ovld __cnfn convert_uint_sat(char);
-uint __ovld __cnfn convert_uint_rte(uchar);
-uint __ovld __cnfn convert_uint_sat_rte(uchar);
-uint __ovld __cnfn convert_uint_rtz(uchar);
-uint __ovld __cnfn convert_uint_sat_rtz(uchar);
-uint __ovld __cnfn convert_uint_rtp(uchar);
-uint __ovld __cnfn convert_uint_sat_rtp(uchar);
-uint __ovld __cnfn convert_uint_rtn(uchar);
-uint __ovld __cnfn convert_uint_sat_rtn(uchar);
-uint __ovld __cnfn convert_uint(uchar);
-uint __ovld __cnfn convert_uint_sat(uchar);
-uint __ovld __cnfn convert_uint_rte(short);
-uint __ovld __cnfn convert_uint_sat_rte(short);
-uint __ovld __cnfn convert_uint_rtz(short);
-uint __ovld __cnfn convert_uint_sat_rtz(short);
-uint __ovld __cnfn convert_uint_rtp(short);
-uint __ovld __cnfn convert_uint_sat_rtp(short);
-uint __ovld __cnfn convert_uint_rtn(short);
-uint __ovld __cnfn convert_uint_sat_rtn(short);
-uint __ovld __cnfn convert_uint(short);
-uint __ovld __cnfn convert_uint_sat(short);
-uint __ovld __cnfn convert_uint_rte(ushort);
-uint __ovld __cnfn convert_uint_sat_rte(ushort);
-uint __ovld __cnfn convert_uint_rtz(ushort);
-uint __ovld __cnfn convert_uint_sat_rtz(ushort);
-uint __ovld __cnfn convert_uint_rtp(ushort);
-uint __ovld __cnfn convert_uint_sat_rtp(ushort);
-uint __ovld __cnfn convert_uint_rtn(ushort);
-uint __ovld __cnfn convert_uint_sat_rtn(ushort);
-uint __ovld __cnfn convert_uint(ushort);
-uint __ovld __cnfn convert_uint_sat(ushort);
-uint __ovld __cnfn convert_uint_rte(int);
-uint __ovld __cnfn convert_uint_sat_rte(int);
-uint __ovld __cnfn convert_uint_rtz(int);
-uint __ovld __cnfn convert_uint_sat_rtz(int);
-uint __ovld __cnfn convert_uint_rtp(int);
-uint __ovld __cnfn convert_uint_sat_rtp(int);
-uint __ovld __cnfn convert_uint_rtn(int);
-uint __ovld __cnfn convert_uint_sat_rtn(int);
-uint __ovld __cnfn convert_uint(int);
-uint __ovld __cnfn convert_uint_sat(int);
-uint __ovld __cnfn convert_uint_rte(uint);
-uint __ovld __cnfn convert_uint_sat_rte(uint);
-uint __ovld __cnfn convert_uint_rtz(uint);
-uint __ovld __cnfn convert_uint_sat_rtz(uint);
-uint __ovld __cnfn convert_uint_rtp(uint);
-uint __ovld __cnfn convert_uint_sat_rtp(uint);
-uint __ovld __cnfn convert_uint_rtn(uint);
-uint __ovld __cnfn convert_uint_sat_rtn(uint);
-uint __ovld __cnfn convert_uint(uint);
-uint __ovld __cnfn convert_uint_sat(uint);
-uint __ovld __cnfn convert_uint_rte(long);
-uint __ovld __cnfn convert_uint_sat_rte(long);
-uint __ovld __cnfn convert_uint_rtz(long);
-uint __ovld __cnfn convert_uint_sat_rtz(long);
-uint __ovld __cnfn convert_uint_rtp(long);
-uint __ovld __cnfn convert_uint_sat_rtp(long);
-uint __ovld __cnfn convert_uint_rtn(long);
-uint __ovld __cnfn convert_uint_sat_rtn(long);
-uint __ovld __cnfn convert_uint(long);
-uint __ovld __cnfn convert_uint_sat(long);
-uint __ovld __cnfn convert_uint_rte(ulong);
-uint __ovld __cnfn convert_uint_sat_rte(ulong);
-uint __ovld __cnfn convert_uint_rtz(ulong);
-uint __ovld __cnfn convert_uint_sat_rtz(ulong);
-uint __ovld __cnfn convert_uint_rtp(ulong);
-uint __ovld __cnfn convert_uint_sat_rtp(ulong);
-uint __ovld __cnfn convert_uint_rtn(ulong);
-uint __ovld __cnfn convert_uint_sat_rtn(ulong);
-uint __ovld __cnfn convert_uint(ulong);
-uint __ovld __cnfn convert_uint_sat(ulong);
-uint __ovld __cnfn convert_uint_rte(float);
-uint __ovld __cnfn convert_uint_sat_rte(float);
-uint __ovld __cnfn convert_uint_rtz(float);
-uint __ovld __cnfn convert_uint_sat_rtz(float);
-uint __ovld __cnfn convert_uint_rtp(float);
-uint __ovld __cnfn convert_uint_sat_rtp(float);
-uint __ovld __cnfn convert_uint_rtn(float);
-uint __ovld __cnfn convert_uint_sat_rtn(float);
-uint __ovld __cnfn convert_uint(float);
-uint __ovld __cnfn convert_uint_sat(float);
-long __ovld __cnfn convert_long_rte(char);
-long __ovld __cnfn convert_long_sat_rte(char);
-long __ovld __cnfn convert_long_rtz(char);
-long __ovld __cnfn convert_long_sat_rtz(char);
-long __ovld __cnfn convert_long_rtp(char);
-long __ovld __cnfn convert_long_sat_rtp(char);
-long __ovld __cnfn convert_long_rtn(char);
-long __ovld __cnfn convert_long_sat_rtn(char);
-long __ovld __cnfn convert_long(char);
-long __ovld __cnfn convert_long_sat(char);
-long __ovld __cnfn convert_long_rte(uchar);
-long __ovld __cnfn convert_long_sat_rte(uchar);
-long __ovld __cnfn convert_long_rtz(uchar);
-long __ovld __cnfn convert_long_sat_rtz(uchar);
-long __ovld __cnfn convert_long_rtp(uchar);
-long __ovld __cnfn convert_long_sat_rtp(uchar);
-long __ovld __cnfn convert_long_rtn(uchar);
-long __ovld __cnfn convert_long_sat_rtn(uchar);
-long __ovld __cnfn convert_long(uchar);
-long __ovld __cnfn convert_long_sat(uchar);
-long __ovld __cnfn convert_long_rte(short);
-long __ovld __cnfn convert_long_sat_rte(short);
-long __ovld __cnfn convert_long_rtz(short);
-long __ovld __cnfn convert_long_sat_rtz(short);
-long __ovld __cnfn convert_long_rtp(short);
-long __ovld __cnfn convert_long_sat_rtp(short);
-long __ovld __cnfn convert_long_rtn(short);
-long __ovld __cnfn convert_long_sat_rtn(short);
-long __ovld __cnfn convert_long(short);
-long __ovld __cnfn convert_long_sat(short);
-long __ovld __cnfn convert_long_rte(ushort);
-long __ovld __cnfn convert_long_sat_rte(ushort);
-long __ovld __cnfn convert_long_rtz(ushort);
-long __ovld __cnfn convert_long_sat_rtz(ushort);
-long __ovld __cnfn convert_long_rtp(ushort);
-long __ovld __cnfn convert_long_sat_rtp(ushort);
-long __ovld __cnfn convert_long_rtn(ushort);
-long __ovld __cnfn convert_long_sat_rtn(ushort);
-long __ovld __cnfn convert_long(ushort);
-long __ovld __cnfn convert_long_sat(ushort);
-long __ovld __cnfn convert_long_rte(int);
-long __ovld __cnfn convert_long_sat_rte(int);
-long __ovld __cnfn convert_long_rtz(int);
-long __ovld __cnfn convert_long_sat_rtz(int);
-long __ovld __cnfn convert_long_rtp(int);
-long __ovld __cnfn convert_long_sat_rtp(int);
-long __ovld __cnfn convert_long_rtn(int);
-long __ovld __cnfn convert_long_sat_rtn(int);
-long __ovld __cnfn convert_long(int);
-long __ovld __cnfn convert_long_sat(int);
-long __ovld __cnfn convert_long_rte(uint);
-long __ovld __cnfn convert_long_sat_rte(uint);
-long __ovld __cnfn convert_long_rtz(uint);
-long __ovld __cnfn convert_long_sat_rtz(uint);
-long __ovld __cnfn convert_long_rtp(uint);
-long __ovld __cnfn convert_long_sat_rtp(uint);
-long __ovld __cnfn convert_long_rtn(uint);
-long __ovld __cnfn convert_long_sat_rtn(uint);
-long __ovld __cnfn convert_long(uint);
-long __ovld __cnfn convert_long_sat(uint);
-long __ovld __cnfn convert_long_rte(long);
-long __ovld __cnfn convert_long_sat_rte(long);
-long __ovld __cnfn convert_long_rtz(long);
-long __ovld __cnfn convert_long_sat_rtz(long);
-long __ovld __cnfn convert_long_rtp(long);
-long __ovld __cnfn convert_long_sat_rtp(long);
-long __ovld __cnfn convert_long_rtn(long);
-long __ovld __cnfn convert_long_sat_rtn(long);
-long __ovld __cnfn convert_long(long);
-long __ovld __cnfn convert_long_sat(long);
-long __ovld __cnfn convert_long_rte(ulong);
-long __ovld __cnfn convert_long_sat_rte(ulong);
-long __ovld __cnfn convert_long_rtz(ulong);
-long __ovld __cnfn convert_long_sat_rtz(ulong);
-long __ovld __cnfn convert_long_rtp(ulong);
-long __ovld __cnfn convert_long_sat_rtp(ulong);
-long __ovld __cnfn convert_long_rtn(ulong);
-long __ovld __cnfn convert_long_sat_rtn(ulong);
-long __ovld __cnfn convert_long(ulong);
-long __ovld __cnfn convert_long_sat(ulong);
-long __ovld __cnfn convert_long_rte(float);
-long __ovld __cnfn convert_long_sat_rte(float);
-long __ovld __cnfn convert_long_rtz(float);
-long __ovld __cnfn convert_long_sat_rtz(float);
-long __ovld __cnfn convert_long_rtp(float);
-long __ovld __cnfn convert_long_sat_rtp(float);
-long __ovld __cnfn convert_long_rtn(float);
-long __ovld __cnfn convert_long_sat_rtn(float);
-long __ovld __cnfn convert_long(float);
-long __ovld __cnfn convert_long_sat(float);
-ulong __ovld __cnfn convert_ulong_rte(char);
-ulong __ovld __cnfn convert_ulong_sat_rte(char);
-ulong __ovld __cnfn convert_ulong_rtz(char);
-ulong __ovld __cnfn convert_ulong_sat_rtz(char);
-ulong __ovld __cnfn convert_ulong_rtp(char);
-ulong __ovld __cnfn convert_ulong_sat_rtp(char);
-ulong __ovld __cnfn convert_ulong_rtn(char);
-ulong __ovld __cnfn convert_ulong_sat_rtn(char);
-ulong __ovld __cnfn convert_ulong(char);
-ulong __ovld __cnfn convert_ulong_sat(char);
-ulong __ovld __cnfn convert_ulong_rte(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rte(uchar);
-ulong __ovld __cnfn convert_ulong_rtz(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rtz(uchar);
-ulong __ovld __cnfn convert_ulong_rtp(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rtp(uchar);
-ulong __ovld __cnfn convert_ulong_rtn(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rtn(uchar);
-ulong __ovld __cnfn convert_ulong(uchar);
-ulong __ovld __cnfn convert_ulong_sat(uchar);
-ulong __ovld __cnfn convert_ulong_rte(short);
-ulong __ovld __cnfn convert_ulong_sat_rte(short);
-ulong __ovld __cnfn convert_ulong_rtz(short);
-ulong __ovld __cnfn convert_ulong_sat_rtz(short);
-ulong __ovld __cnfn convert_ulong_rtp(short);
-ulong __ovld __cnfn convert_ulong_sat_rtp(short);
-ulong __ovld __cnfn convert_ulong_rtn(short);
-ulong __ovld __cnfn convert_ulong_sat_rtn(short);
-ulong __ovld __cnfn convert_ulong(short);
-ulong __ovld __cnfn convert_ulong_sat(short);
-ulong __ovld __cnfn convert_ulong_rte(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rte(ushort);
-ulong __ovld __cnfn convert_ulong_rtz(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rtz(ushort);
-ulong __ovld __cnfn convert_ulong_rtp(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rtp(ushort);
-ulong __ovld __cnfn convert_ulong_rtn(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rtn(ushort);
-ulong __ovld __cnfn convert_ulong(ushort);
-ulong __ovld __cnfn convert_ulong_sat(ushort);
-ulong __ovld __cnfn convert_ulong_rte(int);
-ulong __ovld __cnfn convert_ulong_sat_rte(int);
-ulong __ovld __cnfn convert_ulong_rtz(int);
-ulong __ovld __cnfn convert_ulong_sat_rtz(int);
-ulong __ovld __cnfn convert_ulong_rtp(int);
-ulong __ovld __cnfn convert_ulong_sat_rtp(int);
-ulong __ovld __cnfn convert_ulong_rtn(int);
-ulong __ovld __cnfn convert_ulong_sat_rtn(int);
-ulong __ovld __cnfn convert_ulong(int);
-ulong __ovld __cnfn convert_ulong_sat(int);
-ulong __ovld __cnfn convert_ulong_rte(uint);
-ulong __ovld __cnfn convert_ulong_sat_rte(uint);
-ulong __ovld __cnfn convert_ulong_rtz(uint);
-ulong __ovld __cnfn convert_ulong_sat_rtz(uint);
-ulong __ovld __cnfn convert_ulong_rtp(uint);
-ulong __ovld __cnfn convert_ulong_sat_rtp(uint);
-ulong __ovld __cnfn convert_ulong_rtn(uint);
-ulong __ovld __cnfn convert_ulong_sat_rtn(uint);
-ulong __ovld __cnfn convert_ulong(uint);
-ulong __ovld __cnfn convert_ulong_sat(uint);
-ulong __ovld __cnfn convert_ulong_rte(long);
-ulong __ovld __cnfn convert_ulong_sat_rte(long);
-ulong __ovld __cnfn convert_ulong_rtz(long);
-ulong __ovld __cnfn convert_ulong_sat_rtz(long);
-ulong __ovld __cnfn convert_ulong_rtp(long);
-ulong __ovld __cnfn convert_ulong_sat_rtp(long);
-ulong __ovld __cnfn convert_ulong_rtn(long);
-ulong __ovld __cnfn convert_ulong_sat_rtn(long);
-ulong __ovld __cnfn convert_ulong(long);
-ulong __ovld __cnfn convert_ulong_sat(long);
-ulong __ovld __cnfn convert_ulong_rte(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rte(ulong);
-ulong __ovld __cnfn convert_ulong_rtz(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rtz(ulong);
-ulong __ovld __cnfn convert_ulong_rtp(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rtp(ulong);
-ulong __ovld __cnfn convert_ulong_rtn(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rtn(ulong);
-ulong __ovld __cnfn convert_ulong(ulong);
-ulong __ovld __cnfn convert_ulong_sat(ulong);
-ulong __ovld __cnfn convert_ulong_rte(float);
-ulong __ovld __cnfn convert_ulong_sat_rte(float);
-ulong __ovld __cnfn convert_ulong_rtz(float);
-ulong __ovld __cnfn convert_ulong_sat_rtz(float);
-ulong __ovld __cnfn convert_ulong_rtp(float);
-ulong __ovld __cnfn convert_ulong_sat_rtp(float);
-ulong __ovld __cnfn convert_ulong_rtn(float);
-ulong __ovld __cnfn convert_ulong_sat_rtn(float);
-ulong __ovld __cnfn convert_ulong(float);
-ulong __ovld __cnfn convert_ulong_sat(float);
-float __ovld __cnfn convert_float_rte(char);
-float __ovld __cnfn convert_float_rtz(char);
-float __ovld __cnfn convert_float_rtp(char);
-float __ovld __cnfn convert_float_rtn(char);
-float __ovld __cnfn convert_float(char);
-float __ovld __cnfn convert_float_rte(uchar);
-float __ovld __cnfn convert_float_rtz(uchar);
-float __ovld __cnfn convert_float_rtp(uchar);
-float __ovld __cnfn convert_float_rtn(uchar);
-float __ovld __cnfn convert_float(uchar);
-float __ovld __cnfn convert_float_rte(short);
-float __ovld __cnfn convert_float_rtz(short);
-float __ovld __cnfn convert_float_rtp(short);
-float __ovld __cnfn convert_float_rtn(short);
-float __ovld __cnfn convert_float(short);
-float __ovld __cnfn convert_float_rte(ushort);
-float __ovld __cnfn convert_float_rtz(ushort);
-float __ovld __cnfn convert_float_rtp(ushort);
-float __ovld __cnfn convert_float_rtn(ushort);
-float __ovld __cnfn convert_float(ushort);
-float __ovld __cnfn convert_float_rte(int);
-float __ovld __cnfn convert_float_rtz(int);
-float __ovld __cnfn convert_float_rtp(int);
-float __ovld __cnfn convert_float_rtn(int);
-float __ovld __cnfn convert_float(int);
-float __ovld __cnfn convert_float_rte(uint);
-float __ovld __cnfn convert_float_rtz(uint);
-float __ovld __cnfn convert_float_rtp(uint);
-float __ovld __cnfn convert_float_rtn(uint);
-float __ovld __cnfn convert_float(uint);
-float __ovld __cnfn convert_float_rte(long);
-float __ovld __cnfn convert_float_rtz(long);
-float __ovld __cnfn convert_float_rtp(long);
-float __ovld __cnfn convert_float_rtn(long);
-float __ovld __cnfn convert_float(long);
-float __ovld __cnfn convert_float_rte(ulong);
-float __ovld __cnfn convert_float_rtz(ulong);
-float __ovld __cnfn convert_float_rtp(ulong);
-float __ovld __cnfn convert_float_rtn(ulong);
-float __ovld __cnfn convert_float(ulong);
-float __ovld __cnfn convert_float_rte(float);
-float __ovld __cnfn convert_float_rtz(float);
-float __ovld __cnfn convert_float_rtp(float);
-float __ovld __cnfn convert_float_rtn(float);
-float __ovld __cnfn convert_float(float);
-char2 __ovld __cnfn convert_char2_rte(char2);
-char2 __ovld __cnfn convert_char2_sat_rte(char2);
-char2 __ovld __cnfn convert_char2_rtz(char2);
-char2 __ovld __cnfn convert_char2_sat_rtz(char2);
-char2 __ovld __cnfn convert_char2_rtp(char2);
-char2 __ovld __cnfn convert_char2_sat_rtp(char2);
-char2 __ovld __cnfn convert_char2_rtn(char2);
-char2 __ovld __cnfn convert_char2_sat_rtn(char2);
-char2 __ovld __cnfn convert_char2(char2);
-char2 __ovld __cnfn convert_char2_sat(char2);
-char2 __ovld __cnfn convert_char2_rte(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rte(uchar2);
-char2 __ovld __cnfn convert_char2_rtz(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rtz(uchar2);
-char2 __ovld __cnfn convert_char2_rtp(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rtp(uchar2);
-char2 __ovld __cnfn convert_char2_rtn(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rtn(uchar2);
-char2 __ovld __cnfn convert_char2(uchar2);
-char2 __ovld __cnfn convert_char2_sat(uchar2);
-char2 __ovld __cnfn convert_char2_rte(short2);
-char2 __ovld __cnfn convert_char2_sat_rte(short2);
-char2 __ovld __cnfn convert_char2_rtz(short2);
-char2 __ovld __cnfn convert_char2_sat_rtz(short2);
-char2 __ovld __cnfn convert_char2_rtp(short2);
-char2 __ovld __cnfn convert_char2_sat_rtp(short2);
-char2 __ovld __cnfn convert_char2_rtn(short2);
-char2 __ovld __cnfn convert_char2_sat_rtn(short2);
-char2 __ovld __cnfn convert_char2(short2);
-char2 __ovld __cnfn convert_char2_sat(short2);
-char2 __ovld __cnfn convert_char2_rte(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rte(ushort2);
-char2 __ovld __cnfn convert_char2_rtz(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rtz(ushort2);
-char2 __ovld __cnfn convert_char2_rtp(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rtp(ushort2);
-char2 __ovld __cnfn convert_char2_rtn(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rtn(ushort2);
-char2 __ovld __cnfn convert_char2(ushort2);
-char2 __ovld __cnfn convert_char2_sat(ushort2);
-char2 __ovld __cnfn convert_char2_rte(int2);
-char2 __ovld __cnfn convert_char2_sat_rte(int2);
-char2 __ovld __cnfn convert_char2_rtz(int2);
-char2 __ovld __cnfn convert_char2_sat_rtz(int2);
-char2 __ovld __cnfn convert_char2_rtp(int2);
-char2 __ovld __cnfn convert_char2_sat_rtp(int2);
-char2 __ovld __cnfn convert_char2_rtn(int2);
-char2 __ovld __cnfn convert_char2_sat_rtn(int2);
-char2 __ovld __cnfn convert_char2(int2);
-char2 __ovld __cnfn convert_char2_sat(int2);
-char2 __ovld __cnfn convert_char2_rte(uint2);
-char2 __ovld __cnfn convert_char2_sat_rte(uint2);
-char2 __ovld __cnfn convert_char2_rtz(uint2);
-char2 __ovld __cnfn convert_char2_sat_rtz(uint2);
-char2 __ovld __cnfn convert_char2_rtp(uint2);
-char2 __ovld __cnfn convert_char2_sat_rtp(uint2);
-char2 __ovld __cnfn convert_char2_rtn(uint2);
-char2 __ovld __cnfn convert_char2_sat_rtn(uint2);
-char2 __ovld __cnfn convert_char2(uint2);
-char2 __ovld __cnfn convert_char2_sat(uint2);
-char2 __ovld __cnfn convert_char2_rte(long2);
-char2 __ovld __cnfn convert_char2_sat_rte(long2);
-char2 __ovld __cnfn convert_char2_rtz(long2);
-char2 __ovld __cnfn convert_char2_sat_rtz(long2);
-char2 __ovld __cnfn convert_char2_rtp(long2);
-char2 __ovld __cnfn convert_char2_sat_rtp(long2);
-char2 __ovld __cnfn convert_char2_rtn(long2);
-char2 __ovld __cnfn convert_char2_sat_rtn(long2);
-char2 __ovld __cnfn convert_char2(long2);
-char2 __ovld __cnfn convert_char2_sat(long2);
-char2 __ovld __cnfn convert_char2_rte(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rte(ulong2);
-char2 __ovld __cnfn convert_char2_rtz(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rtz(ulong2);
-char2 __ovld __cnfn convert_char2_rtp(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rtp(ulong2);
-char2 __ovld __cnfn convert_char2_rtn(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rtn(ulong2);
-char2 __ovld __cnfn convert_char2(ulong2);
-char2 __ovld __cnfn convert_char2_sat(ulong2);
-char2 __ovld __cnfn convert_char2_rte(float2);
-char2 __ovld __cnfn convert_char2_sat_rte(float2);
-char2 __ovld __cnfn convert_char2_rtz(float2);
-char2 __ovld __cnfn convert_char2_sat_rtz(float2);
-char2 __ovld __cnfn convert_char2_rtp(float2);
-char2 __ovld __cnfn convert_char2_sat_rtp(float2);
-char2 __ovld __cnfn convert_char2_rtn(float2);
-char2 __ovld __cnfn convert_char2_sat_rtn(float2);
-char2 __ovld __cnfn convert_char2(float2);
-char2 __ovld __cnfn convert_char2_sat(float2);
-uchar2 __ovld __cnfn convert_uchar2_rte(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2);
-uchar2 __ovld __cnfn convert_uchar2(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat(char2);
-uchar2 __ovld __cnfn convert_uchar2_rte(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uchar2);
-uchar2 __ovld __cnfn convert_uchar2(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rte(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(short2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(short2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(short2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(short2);
-uchar2 __ovld __cnfn convert_uchar2(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat(short2);
-uchar2 __ovld __cnfn convert_uchar2_rte(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ushort2);
-uchar2 __ovld __cnfn convert_uchar2(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rte(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(int2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(int2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(int2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(int2);
-uchar2 __ovld __cnfn convert_uchar2(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat(int2);
-uchar2 __ovld __cnfn convert_uchar2_rte(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uint2);
-uchar2 __ovld __cnfn convert_uchar2(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rte(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(long2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(long2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(long2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(long2);
-uchar2 __ovld __cnfn convert_uchar2(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat(long2);
-uchar2 __ovld __cnfn convert_uchar2_rte(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ulong2);
-uchar2 __ovld __cnfn convert_uchar2(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rte(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(float2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(float2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(float2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(float2);
-uchar2 __ovld __cnfn convert_uchar2(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat(float2);
-short2 __ovld __cnfn convert_short2_rte(char2);
-short2 __ovld __cnfn convert_short2_sat_rte(char2);
-short2 __ovld __cnfn convert_short2_rtz(char2);
-short2 __ovld __cnfn convert_short2_sat_rtz(char2);
-short2 __ovld __cnfn convert_short2_rtp(char2);
-short2 __ovld __cnfn convert_short2_sat_rtp(char2);
-short2 __ovld __cnfn convert_short2_rtn(char2);
-short2 __ovld __cnfn convert_short2_sat_rtn(char2);
-short2 __ovld __cnfn convert_short2(char2);
-short2 __ovld __cnfn convert_short2_sat(char2);
-short2 __ovld __cnfn convert_short2_rte(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rte(uchar2);
-short2 __ovld __cnfn convert_short2_rtz(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rtz(uchar2);
-short2 __ovld __cnfn convert_short2_rtp(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rtp(uchar2);
-short2 __ovld __cnfn convert_short2_rtn(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rtn(uchar2);
-short2 __ovld __cnfn convert_short2(uchar2);
-short2 __ovld __cnfn convert_short2_sat(uchar2);
-short2 __ovld __cnfn convert_short2_rte(short2);
-short2 __ovld __cnfn convert_short2_sat_rte(short2);
-short2 __ovld __cnfn convert_short2_rtz(short2);
-short2 __ovld __cnfn convert_short2_sat_rtz(short2);
-short2 __ovld __cnfn convert_short2_rtp(short2);
-short2 __ovld __cnfn convert_short2_sat_rtp(short2);
-short2 __ovld __cnfn convert_short2_rtn(short2);
-short2 __ovld __cnfn convert_short2_sat_rtn(short2);
-short2 __ovld __cnfn convert_short2(short2);
-short2 __ovld __cnfn convert_short2_sat(short2);
-short2 __ovld __cnfn convert_short2_rte(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rte(ushort2);
-short2 __ovld __cnfn convert_short2_rtz(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rtz(ushort2);
-short2 __ovld __cnfn convert_short2_rtp(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rtp(ushort2);
-short2 __ovld __cnfn convert_short2_rtn(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rtn(ushort2);
-short2 __ovld __cnfn convert_short2(ushort2);
-short2 __ovld __cnfn convert_short2_sat(ushort2);
-short2 __ovld __cnfn convert_short2_rte(int2);
-short2 __ovld __cnfn convert_short2_sat_rte(int2);
-short2 __ovld __cnfn convert_short2_rtz(int2);
-short2 __ovld __cnfn convert_short2_sat_rtz(int2);
-short2 __ovld __cnfn convert_short2_rtp(int2);
-short2 __ovld __cnfn convert_short2_sat_rtp(int2);
-short2 __ovld __cnfn convert_short2_rtn(int2);
-short2 __ovld __cnfn convert_short2_sat_rtn(int2);
-short2 __ovld __cnfn convert_short2(int2);
-short2 __ovld __cnfn convert_short2_sat(int2);
-short2 __ovld __cnfn convert_short2_rte(uint2);
-short2 __ovld __cnfn convert_short2_sat_rte(uint2);
-short2 __ovld __cnfn convert_short2_rtz(uint2);
-short2 __ovld __cnfn convert_short2_sat_rtz(uint2);
-short2 __ovld __cnfn convert_short2_rtp(uint2);
-short2 __ovld __cnfn convert_short2_sat_rtp(uint2);
-short2 __ovld __cnfn convert_short2_rtn(uint2);
-short2 __ovld __cnfn convert_short2_sat_rtn(uint2);
-short2 __ovld __cnfn convert_short2(uint2);
-short2 __ovld __cnfn convert_short2_sat(uint2);
-short2 __ovld __cnfn convert_short2_rte(long2);
-short2 __ovld __cnfn convert_short2_sat_rte(long2);
-short2 __ovld __cnfn convert_short2_rtz(long2);
-short2 __ovld __cnfn convert_short2_sat_rtz(long2);
-short2 __ovld __cnfn convert_short2_rtp(long2);
-short2 __ovld __cnfn convert_short2_sat_rtp(long2);
-short2 __ovld __cnfn convert_short2_rtn(long2);
-short2 __ovld __cnfn convert_short2_sat_rtn(long2);
-short2 __ovld __cnfn convert_short2(long2);
-short2 __ovld __cnfn convert_short2_sat(long2);
-short2 __ovld __cnfn convert_short2_rte(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rte(ulong2);
-short2 __ovld __cnfn convert_short2_rtz(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rtz(ulong2);
-short2 __ovld __cnfn convert_short2_rtp(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rtp(ulong2);
-short2 __ovld __cnfn convert_short2_rtn(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rtn(ulong2);
-short2 __ovld __cnfn convert_short2(ulong2);
-short2 __ovld __cnfn convert_short2_sat(ulong2);
-short2 __ovld __cnfn convert_short2_rte(float2);
-short2 __ovld __cnfn convert_short2_sat_rte(float2);
-short2 __ovld __cnfn convert_short2_rtz(float2);
-short2 __ovld __cnfn convert_short2_sat_rtz(float2);
-short2 __ovld __cnfn convert_short2_rtp(float2);
-short2 __ovld __cnfn convert_short2_sat_rtp(float2);
-short2 __ovld __cnfn convert_short2_rtn(float2);
-short2 __ovld __cnfn convert_short2_sat_rtn(float2);
-short2 __ovld __cnfn convert_short2(float2);
-short2 __ovld __cnfn convert_short2_sat(float2);
-ushort2 __ovld __cnfn convert_ushort2_rte(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2);
-ushort2 __ovld __cnfn convert_ushort2(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat(char2);
-ushort2 __ovld __cnfn convert_ushort2_rte(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uchar2);
-ushort2 __ovld __cnfn convert_ushort2(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rte(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(short2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(short2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(short2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(short2);
-ushort2 __ovld __cnfn convert_ushort2(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat(short2);
-ushort2 __ovld __cnfn convert_ushort2_rte(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ushort2);
-ushort2 __ovld __cnfn convert_ushort2(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rte(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(int2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(int2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(int2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(int2);
-ushort2 __ovld __cnfn convert_ushort2(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat(int2);
-ushort2 __ovld __cnfn convert_ushort2_rte(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uint2);
-ushort2 __ovld __cnfn convert_ushort2(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rte(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(long2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(long2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(long2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(long2);
-ushort2 __ovld __cnfn convert_ushort2(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat(long2);
-ushort2 __ovld __cnfn convert_ushort2_rte(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ulong2);
-ushort2 __ovld __cnfn convert_ushort2(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rte(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(float2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(float2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(float2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(float2);
-ushort2 __ovld __cnfn convert_ushort2(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat(float2);
-int2 __ovld __cnfn convert_int2_rte(char2);
-int2 __ovld __cnfn convert_int2_sat_rte(char2);
-int2 __ovld __cnfn convert_int2_rtz(char2);
-int2 __ovld __cnfn convert_int2_sat_rtz(char2);
-int2 __ovld __cnfn convert_int2_rtp(char2);
-int2 __ovld __cnfn convert_int2_sat_rtp(char2);
-int2 __ovld __cnfn convert_int2_rtn(char2);
-int2 __ovld __cnfn convert_int2_sat_rtn(char2);
-int2 __ovld __cnfn convert_int2(char2);
-int2 __ovld __cnfn convert_int2_sat(char2);
-int2 __ovld __cnfn convert_int2_rte(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rte(uchar2);
-int2 __ovld __cnfn convert_int2_rtz(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rtz(uchar2);
-int2 __ovld __cnfn convert_int2_rtp(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rtp(uchar2);
-int2 __ovld __cnfn convert_int2_rtn(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rtn(uchar2);
-int2 __ovld __cnfn convert_int2(uchar2);
-int2 __ovld __cnfn convert_int2_sat(uchar2);
-int2 __ovld __cnfn convert_int2_rte(short2);
-int2 __ovld __cnfn convert_int2_sat_rte(short2);
-int2 __ovld __cnfn convert_int2_rtz(short2);
-int2 __ovld __cnfn convert_int2_sat_rtz(short2);
-int2 __ovld __cnfn convert_int2_rtp(short2);
-int2 __ovld __cnfn convert_int2_sat_rtp(short2);
-int2 __ovld __cnfn convert_int2_rtn(short2);
-int2 __ovld __cnfn convert_int2_sat_rtn(short2);
-int2 __ovld __cnfn convert_int2(short2);
-int2 __ovld __cnfn convert_int2_sat(short2);
-int2 __ovld __cnfn convert_int2_rte(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rte(ushort2);
-int2 __ovld __cnfn convert_int2_rtz(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rtz(ushort2);
-int2 __ovld __cnfn convert_int2_rtp(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rtp(ushort2);
-int2 __ovld __cnfn convert_int2_rtn(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rtn(ushort2);
-int2 __ovld __cnfn convert_int2(ushort2);
-int2 __ovld __cnfn convert_int2_sat(ushort2);
-int2 __ovld __cnfn convert_int2_rte(int2);
-int2 __ovld __cnfn convert_int2_sat_rte(int2);
-int2 __ovld __cnfn convert_int2_rtz(int2);
-int2 __ovld __cnfn convert_int2_sat_rtz(int2);
-int2 __ovld __cnfn convert_int2_rtp(int2);
-int2 __ovld __cnfn convert_int2_sat_rtp(int2);
-int2 __ovld __cnfn convert_int2_rtn(int2);
-int2 __ovld __cnfn convert_int2_sat_rtn(int2);
-int2 __ovld __cnfn convert_int2(int2);
-int2 __ovld __cnfn convert_int2_sat(int2);
-int2 __ovld __cnfn convert_int2_rte(uint2);
-int2 __ovld __cnfn convert_int2_sat_rte(uint2);
-int2 __ovld __cnfn convert_int2_rtz(uint2);
-int2 __ovld __cnfn convert_int2_sat_rtz(uint2);
-int2 __ovld __cnfn convert_int2_rtp(uint2);
-int2 __ovld __cnfn convert_int2_sat_rtp(uint2);
-int2 __ovld __cnfn convert_int2_rtn(uint2);
-int2 __ovld __cnfn convert_int2_sat_rtn(uint2);
-int2 __ovld __cnfn convert_int2(uint2);
-int2 __ovld __cnfn convert_int2_sat(uint2);
-int2 __ovld __cnfn convert_int2_rte(long2);
-int2 __ovld __cnfn convert_int2_sat_rte(long2);
-int2 __ovld __cnfn convert_int2_rtz(long2);
-int2 __ovld __cnfn convert_int2_sat_rtz(long2);
-int2 __ovld __cnfn convert_int2_rtp(long2);
-int2 __ovld __cnfn convert_int2_sat_rtp(long2);
-int2 __ovld __cnfn convert_int2_rtn(long2);
-int2 __ovld __cnfn convert_int2_sat_rtn(long2);
-int2 __ovld __cnfn convert_int2(long2);
-int2 __ovld __cnfn convert_int2_sat(long2);
-int2 __ovld __cnfn convert_int2_rte(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rte(ulong2);
-int2 __ovld __cnfn convert_int2_rtz(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rtz(ulong2);
-int2 __ovld __cnfn convert_int2_rtp(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rtp(ulong2);
-int2 __ovld __cnfn convert_int2_rtn(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rtn(ulong2);
-int2 __ovld __cnfn convert_int2(ulong2);
-int2 __ovld __cnfn convert_int2_sat(ulong2);
-int2 __ovld __cnfn convert_int2_rte(float2);
-int2 __ovld __cnfn convert_int2_sat_rte(float2);
-int2 __ovld __cnfn convert_int2_rtz(float2);
-int2 __ovld __cnfn convert_int2_sat_rtz(float2);
-int2 __ovld __cnfn convert_int2_rtp(float2);
-int2 __ovld __cnfn convert_int2_sat_rtp(float2);
-int2 __ovld __cnfn convert_int2_rtn(float2);
-int2 __ovld __cnfn convert_int2_sat_rtn(float2);
-int2 __ovld __cnfn convert_int2(float2);
-int2 __ovld __cnfn convert_int2_sat(float2);
-uint2 __ovld __cnfn convert_uint2_rte(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(char2);
-uint2 __ovld __cnfn convert_uint2_rtz(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(char2);
-uint2 __ovld __cnfn convert_uint2_rtp(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(char2);
-uint2 __ovld __cnfn convert_uint2_rtn(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(char2);
-uint2 __ovld __cnfn convert_uint2(char2);
-uint2 __ovld __cnfn convert_uint2_sat(char2);
-uint2 __ovld __cnfn convert_uint2_rte(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(uchar2);
-uint2 __ovld __cnfn convert_uint2_rtz(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(uchar2);
-uint2 __ovld __cnfn convert_uint2_rtp(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(uchar2);
-uint2 __ovld __cnfn convert_uint2_rtn(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(uchar2);
-uint2 __ovld __cnfn convert_uint2(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat(uchar2);
-uint2 __ovld __cnfn convert_uint2_rte(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(short2);
-uint2 __ovld __cnfn convert_uint2_rtz(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(short2);
-uint2 __ovld __cnfn convert_uint2_rtp(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(short2);
-uint2 __ovld __cnfn convert_uint2_rtn(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(short2);
-uint2 __ovld __cnfn convert_uint2(short2);
-uint2 __ovld __cnfn convert_uint2_sat(short2);
-uint2 __ovld __cnfn convert_uint2_rte(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(ushort2);
-uint2 __ovld __cnfn convert_uint2_rtz(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(ushort2);
-uint2 __ovld __cnfn convert_uint2_rtp(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(ushort2);
-uint2 __ovld __cnfn convert_uint2_rtn(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(ushort2);
-uint2 __ovld __cnfn convert_uint2(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat(ushort2);
-uint2 __ovld __cnfn convert_uint2_rte(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(int2);
-uint2 __ovld __cnfn convert_uint2_rtz(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(int2);
-uint2 __ovld __cnfn convert_uint2_rtp(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(int2);
-uint2 __ovld __cnfn convert_uint2_rtn(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(int2);
-uint2 __ovld __cnfn convert_uint2(int2);
-uint2 __ovld __cnfn convert_uint2_sat(int2);
-uint2 __ovld __cnfn convert_uint2_rte(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(uint2);
-uint2 __ovld __cnfn convert_uint2_rtz(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(uint2);
-uint2 __ovld __cnfn convert_uint2_rtp(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(uint2);
-uint2 __ovld __cnfn convert_uint2_rtn(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(uint2);
-uint2 __ovld __cnfn convert_uint2(uint2);
-uint2 __ovld __cnfn convert_uint2_sat(uint2);
-uint2 __ovld __cnfn convert_uint2_rte(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(long2);
-uint2 __ovld __cnfn convert_uint2_rtz(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(long2);
-uint2 __ovld __cnfn convert_uint2_rtp(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(long2);
-uint2 __ovld __cnfn convert_uint2_rtn(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(long2);
-uint2 __ovld __cnfn convert_uint2(long2);
-uint2 __ovld __cnfn convert_uint2_sat(long2);
-uint2 __ovld __cnfn convert_uint2_rte(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(ulong2);
-uint2 __ovld __cnfn convert_uint2_rtz(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(ulong2);
-uint2 __ovld __cnfn convert_uint2_rtp(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(ulong2);
-uint2 __ovld __cnfn convert_uint2_rtn(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(ulong2);
-uint2 __ovld __cnfn convert_uint2(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat(ulong2);
-uint2 __ovld __cnfn convert_uint2_rte(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(float2);
-uint2 __ovld __cnfn convert_uint2_rtz(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(float2);
-uint2 __ovld __cnfn convert_uint2_rtp(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(float2);
-uint2 __ovld __cnfn convert_uint2_rtn(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(float2);
-uint2 __ovld __cnfn convert_uint2(float2);
-uint2 __ovld __cnfn convert_uint2_sat(float2);
-long2 __ovld __cnfn convert_long2_rte(char2);
-long2 __ovld __cnfn convert_long2_sat_rte(char2);
-long2 __ovld __cnfn convert_long2_rtz(char2);
-long2 __ovld __cnfn convert_long2_sat_rtz(char2);
-long2 __ovld __cnfn convert_long2_rtp(char2);
-long2 __ovld __cnfn convert_long2_sat_rtp(char2);
-long2 __ovld __cnfn convert_long2_rtn(char2);
-long2 __ovld __cnfn convert_long2_sat_rtn(char2);
-long2 __ovld __cnfn convert_long2(char2);
-long2 __ovld __cnfn convert_long2_sat(char2);
-long2 __ovld __cnfn convert_long2_rte(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rte(uchar2);
-long2 __ovld __cnfn convert_long2_rtz(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rtz(uchar2);
-long2 __ovld __cnfn convert_long2_rtp(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rtp(uchar2);
-long2 __ovld __cnfn convert_long2_rtn(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rtn(uchar2);
-long2 __ovld __cnfn convert_long2(uchar2);
-long2 __ovld __cnfn convert_long2_sat(uchar2);
-long2 __ovld __cnfn convert_long2_rte(short2);
-long2 __ovld __cnfn convert_long2_sat_rte(short2);
-long2 __ovld __cnfn convert_long2_rtz(short2);
-long2 __ovld __cnfn convert_long2_sat_rtz(short2);
-long2 __ovld __cnfn convert_long2_rtp(short2);
-long2 __ovld __cnfn convert_long2_sat_rtp(short2);
-long2 __ovld __cnfn convert_long2_rtn(short2);
-long2 __ovld __cnfn convert_long2_sat_rtn(short2);
-long2 __ovld __cnfn convert_long2(short2);
-long2 __ovld __cnfn convert_long2_sat(short2);
-long2 __ovld __cnfn convert_long2_rte(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rte(ushort2);
-long2 __ovld __cnfn convert_long2_rtz(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rtz(ushort2);
-long2 __ovld __cnfn convert_long2_rtp(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rtp(ushort2);
-long2 __ovld __cnfn convert_long2_rtn(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rtn(ushort2);
-long2 __ovld __cnfn convert_long2(ushort2);
-long2 __ovld __cnfn convert_long2_sat(ushort2);
-long2 __ovld __cnfn convert_long2_rte(int2);
-long2 __ovld __cnfn convert_long2_sat_rte(int2);
-long2 __ovld __cnfn convert_long2_rtz(int2);
-long2 __ovld __cnfn convert_long2_sat_rtz(int2);
-long2 __ovld __cnfn convert_long2_rtp(int2);
-long2 __ovld __cnfn convert_long2_sat_rtp(int2);
-long2 __ovld __cnfn convert_long2_rtn(int2);
-long2 __ovld __cnfn convert_long2_sat_rtn(int2);
-long2 __ovld __cnfn convert_long2(int2);
-long2 __ovld __cnfn convert_long2_sat(int2);
-long2 __ovld __cnfn convert_long2_rte(uint2);
-long2 __ovld __cnfn convert_long2_sat_rte(uint2);
-long2 __ovld __cnfn convert_long2_rtz(uint2);
-long2 __ovld __cnfn convert_long2_sat_rtz(uint2);
-long2 __ovld __cnfn convert_long2_rtp(uint2);
-long2 __ovld __cnfn convert_long2_sat_rtp(uint2);
-long2 __ovld __cnfn convert_long2_rtn(uint2);
-long2 __ovld __cnfn convert_long2_sat_rtn(uint2);
-long2 __ovld __cnfn convert_long2(uint2);
-long2 __ovld __cnfn convert_long2_sat(uint2);
-long2 __ovld __cnfn convert_long2_rte(long2);
-long2 __ovld __cnfn convert_long2_sat_rte(long2);
-long2 __ovld __cnfn convert_long2_rtz(long2);
-long2 __ovld __cnfn convert_long2_sat_rtz(long2);
-long2 __ovld __cnfn convert_long2_rtp(long2);
-long2 __ovld __cnfn convert_long2_sat_rtp(long2);
-long2 __ovld __cnfn convert_long2_rtn(long2);
-long2 __ovld __cnfn convert_long2_sat_rtn(long2);
-long2 __ovld __cnfn convert_long2(long2);
-long2 __ovld __cnfn convert_long2_sat(long2);
-long2 __ovld __cnfn convert_long2_rte(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rte(ulong2);
-long2 __ovld __cnfn convert_long2_rtz(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rtz(ulong2);
-long2 __ovld __cnfn convert_long2_rtp(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rtp(ulong2);
-long2 __ovld __cnfn convert_long2_rtn(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rtn(ulong2);
-long2 __ovld __cnfn convert_long2(ulong2);
-long2 __ovld __cnfn convert_long2_sat(ulong2);
-long2 __ovld __cnfn convert_long2_rte(float2);
-long2 __ovld __cnfn convert_long2_sat_rte(float2);
-long2 __ovld __cnfn convert_long2_rtz(float2);
-long2 __ovld __cnfn convert_long2_sat_rtz(float2);
-long2 __ovld __cnfn convert_long2_rtp(float2);
-long2 __ovld __cnfn convert_long2_sat_rtp(float2);
-long2 __ovld __cnfn convert_long2_rtn(float2);
-long2 __ovld __cnfn convert_long2_sat_rtn(float2);
-long2 __ovld __cnfn convert_long2(float2);
-long2 __ovld __cnfn convert_long2_sat(float2);
-ulong2 __ovld __cnfn convert_ulong2_rte(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2);
-ulong2 __ovld __cnfn convert_ulong2(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat(char2);
-ulong2 __ovld __cnfn convert_ulong2_rte(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uchar2);
-ulong2 __ovld __cnfn convert_ulong2(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rte(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(short2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(short2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(short2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(short2);
-ulong2 __ovld __cnfn convert_ulong2(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat(short2);
-ulong2 __ovld __cnfn convert_ulong2_rte(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ushort2);
-ulong2 __ovld __cnfn convert_ulong2(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rte(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(int2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(int2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(int2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(int2);
-ulong2 __ovld __cnfn convert_ulong2(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat(int2);
-ulong2 __ovld __cnfn convert_ulong2_rte(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uint2);
-ulong2 __ovld __cnfn convert_ulong2(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rte(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(long2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(long2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(long2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(long2);
-ulong2 __ovld __cnfn convert_ulong2(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat(long2);
-ulong2 __ovld __cnfn convert_ulong2_rte(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ulong2);
-ulong2 __ovld __cnfn convert_ulong2(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rte(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(float2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(float2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(float2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(float2);
-ulong2 __ovld __cnfn convert_ulong2(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat(float2);
-float2 __ovld __cnfn convert_float2_rte(char2);
-float2 __ovld __cnfn convert_float2_rtz(char2);
-float2 __ovld __cnfn convert_float2_rtp(char2);
-float2 __ovld __cnfn convert_float2_rtn(char2);
-float2 __ovld __cnfn convert_float2(char2);
-float2 __ovld __cnfn convert_float2_rte(uchar2);
-float2 __ovld __cnfn convert_float2_rtz(uchar2);
-float2 __ovld __cnfn convert_float2_rtp(uchar2);
-float2 __ovld __cnfn convert_float2_rtn(uchar2);
-float2 __ovld __cnfn convert_float2(uchar2);
-float2 __ovld __cnfn convert_float2_rte(short2);
-float2 __ovld __cnfn convert_float2_rtz(short2);
-float2 __ovld __cnfn convert_float2_rtp(short2);
-float2 __ovld __cnfn convert_float2_rtn(short2);
-float2 __ovld __cnfn convert_float2(short2);
-float2 __ovld __cnfn convert_float2_rte(ushort2);
-float2 __ovld __cnfn convert_float2_rtz(ushort2);
-float2 __ovld __cnfn convert_float2_rtp(ushort2);
-float2 __ovld __cnfn convert_float2_rtn(ushort2);
-float2 __ovld __cnfn convert_float2(ushort2);
-float2 __ovld __cnfn convert_float2_rte(int2);
-float2 __ovld __cnfn convert_float2_rtz(int2);
-float2 __ovld __cnfn convert_float2_rtp(int2);
-float2 __ovld __cnfn convert_float2_rtn(int2);
-float2 __ovld __cnfn convert_float2(int2);
-float2 __ovld __cnfn convert_float2_rte(uint2);
-float2 __ovld __cnfn convert_float2_rtz(uint2);
-float2 __ovld __cnfn convert_float2_rtp(uint2);
-float2 __ovld __cnfn convert_float2_rtn(uint2);
-float2 __ovld __cnfn convert_float2(uint2);
-float2 __ovld __cnfn convert_float2_rte(long2);
-float2 __ovld __cnfn convert_float2_rtz(long2);
-float2 __ovld __cnfn convert_float2_rtp(long2);
-float2 __ovld __cnfn convert_float2_rtn(long2);
-float2 __ovld __cnfn convert_float2(long2);
-float2 __ovld __cnfn convert_float2_rte(ulong2);
-float2 __ovld __cnfn convert_float2_rtz(ulong2);
-float2 __ovld __cnfn convert_float2_rtp(ulong2);
-float2 __ovld __cnfn convert_float2_rtn(ulong2);
-float2 __ovld __cnfn convert_float2(ulong2);
-float2 __ovld __cnfn convert_float2_rte(float2);
-float2 __ovld __cnfn convert_float2_rtz(float2);
-float2 __ovld __cnfn convert_float2_rtp(float2);
-float2 __ovld __cnfn convert_float2_rtn(float2);
-float2 __ovld __cnfn convert_float2(float2);
-char3 __ovld __cnfn convert_char3_rte(char3);
-char3 __ovld __cnfn convert_char3_sat_rte(char3);
-char3 __ovld __cnfn convert_char3_rtz(char3);
-char3 __ovld __cnfn convert_char3_sat_rtz(char3);
-char3 __ovld __cnfn convert_char3_rtp(char3);
-char3 __ovld __cnfn convert_char3_sat_rtp(char3);
-char3 __ovld __cnfn convert_char3_rtn(char3);
-char3 __ovld __cnfn convert_char3_sat_rtn(char3);
-char3 __ovld __cnfn convert_char3(char3);
-char3 __ovld __cnfn convert_char3_sat(char3);
-char3 __ovld __cnfn convert_char3_rte(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rte(uchar3);
-char3 __ovld __cnfn convert_char3_rtz(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rtz(uchar3);
-char3 __ovld __cnfn convert_char3_rtp(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rtp(uchar3);
-char3 __ovld __cnfn convert_char3_rtn(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rtn(uchar3);
-char3 __ovld __cnfn convert_char3(uchar3);
-char3 __ovld __cnfn convert_char3_sat(uchar3);
-char3 __ovld __cnfn convert_char3_rte(short3);
-char3 __ovld __cnfn convert_char3_sat_rte(short3);
-char3 __ovld __cnfn convert_char3_rtz(short3);
-char3 __ovld __cnfn convert_char3_sat_rtz(short3);
-char3 __ovld __cnfn convert_char3_rtp(short3);
-char3 __ovld __cnfn convert_char3_sat_rtp(short3);
-char3 __ovld __cnfn convert_char3_rtn(short3);
-char3 __ovld __cnfn convert_char3_sat_rtn(short3);
-char3 __ovld __cnfn convert_char3(short3);
-char3 __ovld __cnfn convert_char3_sat(short3);
-char3 __ovld __cnfn convert_char3_rte(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rte(ushort3);
-char3 __ovld __cnfn convert_char3_rtz(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rtz(ushort3);
-char3 __ovld __cnfn convert_char3_rtp(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rtp(ushort3);
-char3 __ovld __cnfn convert_char3_rtn(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rtn(ushort3);
-char3 __ovld __cnfn convert_char3(ushort3);
-char3 __ovld __cnfn convert_char3_sat(ushort3);
-char3 __ovld __cnfn convert_char3_rte(int3);
-char3 __ovld __cnfn convert_char3_sat_rte(int3);
-char3 __ovld __cnfn convert_char3_rtz(int3);
-char3 __ovld __cnfn convert_char3_sat_rtz(int3);
-char3 __ovld __cnfn convert_char3_rtp(int3);
-char3 __ovld __cnfn convert_char3_sat_rtp(int3);
-char3 __ovld __cnfn convert_char3_rtn(int3);
-char3 __ovld __cnfn convert_char3_sat_rtn(int3);
-char3 __ovld __cnfn convert_char3(int3);
-char3 __ovld __cnfn convert_char3_sat(int3);
-char3 __ovld __cnfn convert_char3_rte(uint3);
-char3 __ovld __cnfn convert_char3_sat_rte(uint3);
-char3 __ovld __cnfn convert_char3_rtz(uint3);
-char3 __ovld __cnfn convert_char3_sat_rtz(uint3);
-char3 __ovld __cnfn convert_char3_rtp(uint3);
-char3 __ovld __cnfn convert_char3_sat_rtp(uint3);
-char3 __ovld __cnfn convert_char3_rtn(uint3);
-char3 __ovld __cnfn convert_char3_sat_rtn(uint3);
-char3 __ovld __cnfn convert_char3(uint3);
-char3 __ovld __cnfn convert_char3_sat(uint3);
-char3 __ovld __cnfn convert_char3_rte(long3);
-char3 __ovld __cnfn convert_char3_sat_rte(long3);
-char3 __ovld __cnfn convert_char3_rtz(long3);
-char3 __ovld __cnfn convert_char3_sat_rtz(long3);
-char3 __ovld __cnfn convert_char3_rtp(long3);
-char3 __ovld __cnfn convert_char3_sat_rtp(long3);
-char3 __ovld __cnfn convert_char3_rtn(long3);
-char3 __ovld __cnfn convert_char3_sat_rtn(long3);
-char3 __ovld __cnfn convert_char3(long3);
-char3 __ovld __cnfn convert_char3_sat(long3);
-char3 __ovld __cnfn convert_char3_rte(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rte(ulong3);
-char3 __ovld __cnfn convert_char3_rtz(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rtz(ulong3);
-char3 __ovld __cnfn convert_char3_rtp(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rtp(ulong3);
-char3 __ovld __cnfn convert_char3_rtn(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rtn(ulong3);
-char3 __ovld __cnfn convert_char3(ulong3);
-char3 __ovld __cnfn convert_char3_sat(ulong3);
-char3 __ovld __cnfn convert_char3_rte(float3);
-char3 __ovld __cnfn convert_char3_sat_rte(float3);
-char3 __ovld __cnfn convert_char3_rtz(float3);
-char3 __ovld __cnfn convert_char3_sat_rtz(float3);
-char3 __ovld __cnfn convert_char3_rtp(float3);
-char3 __ovld __cnfn convert_char3_sat_rtp(float3);
-char3 __ovld __cnfn convert_char3_rtn(float3);
-char3 __ovld __cnfn convert_char3_sat_rtn(float3);
-char3 __ovld __cnfn convert_char3(float3);
-char3 __ovld __cnfn convert_char3_sat(float3);
-uchar3 __ovld __cnfn convert_uchar3_rte(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3);
-uchar3 __ovld __cnfn convert_uchar3(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat(char3);
-uchar3 __ovld __cnfn convert_uchar3_rte(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uchar3);
-uchar3 __ovld __cnfn convert_uchar3(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rte(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(short3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(short3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(short3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(short3);
-uchar3 __ovld __cnfn convert_uchar3(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat(short3);
-uchar3 __ovld __cnfn convert_uchar3_rte(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ushort3);
-uchar3 __ovld __cnfn convert_uchar3(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rte(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(int3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(int3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(int3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(int3);
-uchar3 __ovld __cnfn convert_uchar3(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat(int3);
-uchar3 __ovld __cnfn convert_uchar3_rte(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uint3);
-uchar3 __ovld __cnfn convert_uchar3(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rte(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(long3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(long3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(long3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(long3);
-uchar3 __ovld __cnfn convert_uchar3(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat(long3);
-uchar3 __ovld __cnfn convert_uchar3_rte(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ulong3);
-uchar3 __ovld __cnfn convert_uchar3(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rte(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(float3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(float3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(float3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(float3);
-uchar3 __ovld __cnfn convert_uchar3(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat(float3);
-short3 __ovld __cnfn convert_short3_rte(char3);
-short3 __ovld __cnfn convert_short3_sat_rte(char3);
-short3 __ovld __cnfn convert_short3_rtz(char3);
-short3 __ovld __cnfn convert_short3_sat_rtz(char3);
-short3 __ovld __cnfn convert_short3_rtp(char3);
-short3 __ovld __cnfn convert_short3_sat_rtp(char3);
-short3 __ovld __cnfn convert_short3_rtn(char3);
-short3 __ovld __cnfn convert_short3_sat_rtn(char3);
-short3 __ovld __cnfn convert_short3(char3);
-short3 __ovld __cnfn convert_short3_sat(char3);
-short3 __ovld __cnfn convert_short3_rte(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rte(uchar3);
-short3 __ovld __cnfn convert_short3_rtz(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rtz(uchar3);
-short3 __ovld __cnfn convert_short3_rtp(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rtp(uchar3);
-short3 __ovld __cnfn convert_short3_rtn(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rtn(uchar3);
-short3 __ovld __cnfn convert_short3(uchar3);
-short3 __ovld __cnfn convert_short3_sat(uchar3);
-short3 __ovld __cnfn convert_short3_rte(short3);
-short3 __ovld __cnfn convert_short3_sat_rte(short3);
-short3 __ovld __cnfn convert_short3_rtz(short3);
-short3 __ovld __cnfn convert_short3_sat_rtz(short3);
-short3 __ovld __cnfn convert_short3_rtp(short3);
-short3 __ovld __cnfn convert_short3_sat_rtp(short3);
-short3 __ovld __cnfn convert_short3_rtn(short3);
-short3 __ovld __cnfn convert_short3_sat_rtn(short3);
-short3 __ovld __cnfn convert_short3(short3);
-short3 __ovld __cnfn convert_short3_sat(short3);
-short3 __ovld __cnfn convert_short3_rte(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rte(ushort3);
-short3 __ovld __cnfn convert_short3_rtz(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rtz(ushort3);
-short3 __ovld __cnfn convert_short3_rtp(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rtp(ushort3);
-short3 __ovld __cnfn convert_short3_rtn(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rtn(ushort3);
-short3 __ovld __cnfn convert_short3(ushort3);
-short3 __ovld __cnfn convert_short3_sat(ushort3);
-short3 __ovld __cnfn convert_short3_rte(int3);
-short3 __ovld __cnfn convert_short3_sat_rte(int3);
-short3 __ovld __cnfn convert_short3_rtz(int3);
-short3 __ovld __cnfn convert_short3_sat_rtz(int3);
-short3 __ovld __cnfn convert_short3_rtp(int3);
-short3 __ovld __cnfn convert_short3_sat_rtp(int3);
-short3 __ovld __cnfn convert_short3_rtn(int3);
-short3 __ovld __cnfn convert_short3_sat_rtn(int3);
-short3 __ovld __cnfn convert_short3(int3);
-short3 __ovld __cnfn convert_short3_sat(int3);
-short3 __ovld __cnfn convert_short3_rte(uint3);
-short3 __ovld __cnfn convert_short3_sat_rte(uint3);
-short3 __ovld __cnfn convert_short3_rtz(uint3);
-short3 __ovld __cnfn convert_short3_sat_rtz(uint3);
-short3 __ovld __cnfn convert_short3_rtp(uint3);
-short3 __ovld __cnfn convert_short3_sat_rtp(uint3);
-short3 __ovld __cnfn convert_short3_rtn(uint3);
-short3 __ovld __cnfn convert_short3_sat_rtn(uint3);
-short3 __ovld __cnfn convert_short3(uint3);
-short3 __ovld __cnfn convert_short3_sat(uint3);
-short3 __ovld __cnfn convert_short3_rte(long3);
-short3 __ovld __cnfn convert_short3_sat_rte(long3);
-short3 __ovld __cnfn convert_short3_rtz(long3);
-short3 __ovld __cnfn convert_short3_sat_rtz(long3);
-short3 __ovld __cnfn convert_short3_rtp(long3);
-short3 __ovld __cnfn convert_short3_sat_rtp(long3);
-short3 __ovld __cnfn convert_short3_rtn(long3);
-short3 __ovld __cnfn convert_short3_sat_rtn(long3);
-short3 __ovld __cnfn convert_short3(long3);
-short3 __ovld __cnfn convert_short3_sat(long3);
-short3 __ovld __cnfn convert_short3_rte(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rte(ulong3);
-short3 __ovld __cnfn convert_short3_rtz(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rtz(ulong3);
-short3 __ovld __cnfn convert_short3_rtp(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rtp(ulong3);
-short3 __ovld __cnfn convert_short3_rtn(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rtn(ulong3);
-short3 __ovld __cnfn convert_short3(ulong3);
-short3 __ovld __cnfn convert_short3_sat(ulong3);
-short3 __ovld __cnfn convert_short3_rte(float3);
-short3 __ovld __cnfn convert_short3_sat_rte(float3);
-short3 __ovld __cnfn convert_short3_rtz(float3);
-short3 __ovld __cnfn convert_short3_sat_rtz(float3);
-short3 __ovld __cnfn convert_short3_rtp(float3);
-short3 __ovld __cnfn convert_short3_sat_rtp(float3);
-short3 __ovld __cnfn convert_short3_rtn(float3);
-short3 __ovld __cnfn convert_short3_sat_rtn(float3);
-short3 __ovld __cnfn convert_short3(float3);
-short3 __ovld __cnfn convert_short3_sat(float3);
-ushort3 __ovld __cnfn convert_ushort3_rte(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3);
-ushort3 __ovld __cnfn convert_ushort3(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat(char3);
-ushort3 __ovld __cnfn convert_ushort3_rte(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uchar3);
-ushort3 __ovld __cnfn convert_ushort3(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rte(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(short3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(short3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(short3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(short3);
-ushort3 __ovld __cnfn convert_ushort3(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat(short3);
-ushort3 __ovld __cnfn convert_ushort3_rte(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ushort3);
-ushort3 __ovld __cnfn convert_ushort3(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rte(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(int3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(int3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(int3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(int3);
-ushort3 __ovld __cnfn convert_ushort3(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat(int3);
-ushort3 __ovld __cnfn convert_ushort3_rte(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uint3);
-ushort3 __ovld __cnfn convert_ushort3(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rte(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(long3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(long3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(long3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(long3);
-ushort3 __ovld __cnfn convert_ushort3(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat(long3);
-ushort3 __ovld __cnfn convert_ushort3_rte(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ulong3);
-ushort3 __ovld __cnfn convert_ushort3(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rte(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(float3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(float3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(float3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(float3);
-ushort3 __ovld __cnfn convert_ushort3(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat(float3);
-int3 __ovld __cnfn convert_int3_rte(char3);
-int3 __ovld __cnfn convert_int3_sat_rte(char3);
-int3 __ovld __cnfn convert_int3_rtz(char3);
-int3 __ovld __cnfn convert_int3_sat_rtz(char3);
-int3 __ovld __cnfn convert_int3_rtp(char3);
-int3 __ovld __cnfn convert_int3_sat_rtp(char3);
-int3 __ovld __cnfn convert_int3_rtn(char3);
-int3 __ovld __cnfn convert_int3_sat_rtn(char3);
-int3 __ovld __cnfn convert_int3(char3);
-int3 __ovld __cnfn convert_int3_sat(char3);
-int3 __ovld __cnfn convert_int3_rte(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rte(uchar3);
-int3 __ovld __cnfn convert_int3_rtz(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rtz(uchar3);
-int3 __ovld __cnfn convert_int3_rtp(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rtp(uchar3);
-int3 __ovld __cnfn convert_int3_rtn(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rtn(uchar3);
-int3 __ovld __cnfn convert_int3(uchar3);
-int3 __ovld __cnfn convert_int3_sat(uchar3);
-int3 __ovld __cnfn convert_int3_rte(short3);
-int3 __ovld __cnfn convert_int3_sat_rte(short3);
-int3 __ovld __cnfn convert_int3_rtz(short3);
-int3 __ovld __cnfn convert_int3_sat_rtz(short3);
-int3 __ovld __cnfn convert_int3_rtp(short3);
-int3 __ovld __cnfn convert_int3_sat_rtp(short3);
-int3 __ovld __cnfn convert_int3_rtn(short3);
-int3 __ovld __cnfn convert_int3_sat_rtn(short3);
-int3 __ovld __cnfn convert_int3(short3);
-int3 __ovld __cnfn convert_int3_sat(short3);
-int3 __ovld __cnfn convert_int3_rte(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rte(ushort3);
-int3 __ovld __cnfn convert_int3_rtz(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rtz(ushort3);
-int3 __ovld __cnfn convert_int3_rtp(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rtp(ushort3);
-int3 __ovld __cnfn convert_int3_rtn(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rtn(ushort3);
-int3 __ovld __cnfn convert_int3(ushort3);
-int3 __ovld __cnfn convert_int3_sat(ushort3);
-int3 __ovld __cnfn convert_int3_rte(int3);
-int3 __ovld __cnfn convert_int3_sat_rte(int3);
-int3 __ovld __cnfn convert_int3_rtz(int3);
-int3 __ovld __cnfn convert_int3_sat_rtz(int3);
-int3 __ovld __cnfn convert_int3_rtp(int3);
-int3 __ovld __cnfn convert_int3_sat_rtp(int3);
-int3 __ovld __cnfn convert_int3_rtn(int3);
-int3 __ovld __cnfn convert_int3_sat_rtn(int3);
-int3 __ovld __cnfn convert_int3(int3);
-int3 __ovld __cnfn convert_int3_sat(int3);
-int3 __ovld __cnfn convert_int3_rte(uint3);
-int3 __ovld __cnfn convert_int3_sat_rte(uint3);
-int3 __ovld __cnfn convert_int3_rtz(uint3);
-int3 __ovld __cnfn convert_int3_sat_rtz(uint3);
-int3 __ovld __cnfn convert_int3_rtp(uint3);
-int3 __ovld __cnfn convert_int3_sat_rtp(uint3);
-int3 __ovld __cnfn convert_int3_rtn(uint3);
-int3 __ovld __cnfn convert_int3_sat_rtn(uint3);
-int3 __ovld __cnfn convert_int3(uint3);
-int3 __ovld __cnfn convert_int3_sat(uint3);
-int3 __ovld __cnfn convert_int3_rte(long3);
-int3 __ovld __cnfn convert_int3_sat_rte(long3);
-int3 __ovld __cnfn convert_int3_rtz(long3);
-int3 __ovld __cnfn convert_int3_sat_rtz(long3);
-int3 __ovld __cnfn convert_int3_rtp(long3);
-int3 __ovld __cnfn convert_int3_sat_rtp(long3);
-int3 __ovld __cnfn convert_int3_rtn(long3);
-int3 __ovld __cnfn convert_int3_sat_rtn(long3);
-int3 __ovld __cnfn convert_int3(long3);
-int3 __ovld __cnfn convert_int3_sat(long3);
-int3 __ovld __cnfn convert_int3_rte(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rte(ulong3);
-int3 __ovld __cnfn convert_int3_rtz(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rtz(ulong3);
-int3 __ovld __cnfn convert_int3_rtp(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rtp(ulong3);
-int3 __ovld __cnfn convert_int3_rtn(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rtn(ulong3);
-int3 __ovld __cnfn convert_int3(ulong3);
-int3 __ovld __cnfn convert_int3_sat(ulong3);
-int3 __ovld __cnfn convert_int3_rte(float3);
-int3 __ovld __cnfn convert_int3_sat_rte(float3);
-int3 __ovld __cnfn convert_int3_rtz(float3);
-int3 __ovld __cnfn convert_int3_sat_rtz(float3);
-int3 __ovld __cnfn convert_int3_rtp(float3);
-int3 __ovld __cnfn convert_int3_sat_rtp(float3);
-int3 __ovld __cnfn convert_int3_rtn(float3);
-int3 __ovld __cnfn convert_int3_sat_rtn(float3);
-int3 __ovld __cnfn convert_int3(float3);
-int3 __ovld __cnfn convert_int3_sat(float3);
-uint3 __ovld __cnfn convert_uint3_rte(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(char3);
-uint3 __ovld __cnfn convert_uint3_rtz(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(char3);
-uint3 __ovld __cnfn convert_uint3_rtp(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(char3);
-uint3 __ovld __cnfn convert_uint3_rtn(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(char3);
-uint3 __ovld __cnfn convert_uint3(char3);
-uint3 __ovld __cnfn convert_uint3_sat(char3);
-uint3 __ovld __cnfn convert_uint3_rte(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(uchar3);
-uint3 __ovld __cnfn convert_uint3_rtz(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(uchar3);
-uint3 __ovld __cnfn convert_uint3_rtp(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(uchar3);
-uint3 __ovld __cnfn convert_uint3_rtn(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(uchar3);
-uint3 __ovld __cnfn convert_uint3(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat(uchar3);
-uint3 __ovld __cnfn convert_uint3_rte(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(short3);
-uint3 __ovld __cnfn convert_uint3_rtz(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(short3);
-uint3 __ovld __cnfn convert_uint3_rtp(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(short3);
-uint3 __ovld __cnfn convert_uint3_rtn(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(short3);
-uint3 __ovld __cnfn convert_uint3(short3);
-uint3 __ovld __cnfn convert_uint3_sat(short3);
-uint3 __ovld __cnfn convert_uint3_rte(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(ushort3);
-uint3 __ovld __cnfn convert_uint3_rtz(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(ushort3);
-uint3 __ovld __cnfn convert_uint3_rtp(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(ushort3);
-uint3 __ovld __cnfn convert_uint3_rtn(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(ushort3);
-uint3 __ovld __cnfn convert_uint3(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat(ushort3);
-uint3 __ovld __cnfn convert_uint3_rte(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(int3);
-uint3 __ovld __cnfn convert_uint3_rtz(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(int3);
-uint3 __ovld __cnfn convert_uint3_rtp(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(int3);
-uint3 __ovld __cnfn convert_uint3_rtn(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(int3);
-uint3 __ovld __cnfn convert_uint3(int3);
-uint3 __ovld __cnfn convert_uint3_sat(int3);
-uint3 __ovld __cnfn convert_uint3_rte(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(uint3);
-uint3 __ovld __cnfn convert_uint3_rtz(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(uint3);
-uint3 __ovld __cnfn convert_uint3_rtp(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(uint3);
-uint3 __ovld __cnfn convert_uint3_rtn(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(uint3);
-uint3 __ovld __cnfn convert_uint3(uint3);
-uint3 __ovld __cnfn convert_uint3_sat(uint3);
-uint3 __ovld __cnfn convert_uint3_rte(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(long3);
-uint3 __ovld __cnfn convert_uint3_rtz(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(long3);
-uint3 __ovld __cnfn convert_uint3_rtp(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(long3);
-uint3 __ovld __cnfn convert_uint3_rtn(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(long3);
-uint3 __ovld __cnfn convert_uint3(long3);
-uint3 __ovld __cnfn convert_uint3_sat(long3);
-uint3 __ovld __cnfn convert_uint3_rte(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(ulong3);
-uint3 __ovld __cnfn convert_uint3_rtz(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(ulong3);
-uint3 __ovld __cnfn convert_uint3_rtp(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(ulong3);
-uint3 __ovld __cnfn convert_uint3_rtn(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(ulong3);
-uint3 __ovld __cnfn convert_uint3(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat(ulong3);
-uint3 __ovld __cnfn convert_uint3_rte(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(float3);
-uint3 __ovld __cnfn convert_uint3_rtz(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(float3);
-uint3 __ovld __cnfn convert_uint3_rtp(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(float3);
-uint3 __ovld __cnfn convert_uint3_rtn(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(float3);
-uint3 __ovld __cnfn convert_uint3(float3);
-uint3 __ovld __cnfn convert_uint3_sat(float3);
-long3 __ovld __cnfn convert_long3_rte(char3);
-long3 __ovld __cnfn convert_long3_sat_rte(char3);
-long3 __ovld __cnfn convert_long3_rtz(char3);
-long3 __ovld __cnfn convert_long3_sat_rtz(char3);
-long3 __ovld __cnfn convert_long3_rtp(char3);
-long3 __ovld __cnfn convert_long3_sat_rtp(char3);
-long3 __ovld __cnfn convert_long3_rtn(char3);
-long3 __ovld __cnfn convert_long3_sat_rtn(char3);
-long3 __ovld __cnfn convert_long3(char3);
-long3 __ovld __cnfn convert_long3_sat(char3);
-long3 __ovld __cnfn convert_long3_rte(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rte(uchar3);
-long3 __ovld __cnfn convert_long3_rtz(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rtz(uchar3);
-long3 __ovld __cnfn convert_long3_rtp(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rtp(uchar3);
-long3 __ovld __cnfn convert_long3_rtn(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rtn(uchar3);
-long3 __ovld __cnfn convert_long3(uchar3);
-long3 __ovld __cnfn convert_long3_sat(uchar3);
-long3 __ovld __cnfn convert_long3_rte(short3);
-long3 __ovld __cnfn convert_long3_sat_rte(short3);
-long3 __ovld __cnfn convert_long3_rtz(short3);
-long3 __ovld __cnfn convert_long3_sat_rtz(short3);
-long3 __ovld __cnfn convert_long3_rtp(short3);
-long3 __ovld __cnfn convert_long3_sat_rtp(short3);
-long3 __ovld __cnfn convert_long3_rtn(short3);
-long3 __ovld __cnfn convert_long3_sat_rtn(short3);
-long3 __ovld __cnfn convert_long3(short3);
-long3 __ovld __cnfn convert_long3_sat(short3);
-long3 __ovld __cnfn convert_long3_rte(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rte(ushort3);
-long3 __ovld __cnfn convert_long3_rtz(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rtz(ushort3);
-long3 __ovld __cnfn convert_long3_rtp(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rtp(ushort3);
-long3 __ovld __cnfn convert_long3_rtn(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rtn(ushort3);
-long3 __ovld __cnfn convert_long3(ushort3);
-long3 __ovld __cnfn convert_long3_sat(ushort3);
-long3 __ovld __cnfn convert_long3_rte(int3);
-long3 __ovld __cnfn convert_long3_sat_rte(int3);
-long3 __ovld __cnfn convert_long3_rtz(int3);
-long3 __ovld __cnfn convert_long3_sat_rtz(int3);
-long3 __ovld __cnfn convert_long3_rtp(int3);
-long3 __ovld __cnfn convert_long3_sat_rtp(int3);
-long3 __ovld __cnfn convert_long3_rtn(int3);
-long3 __ovld __cnfn convert_long3_sat_rtn(int3);
-long3 __ovld __cnfn convert_long3(int3);
-long3 __ovld __cnfn convert_long3_sat(int3);
-long3 __ovld __cnfn convert_long3_rte(uint3);
-long3 __ovld __cnfn convert_long3_sat_rte(uint3);
-long3 __ovld __cnfn convert_long3_rtz(uint3);
-long3 __ovld __cnfn convert_long3_sat_rtz(uint3);
-long3 __ovld __cnfn convert_long3_rtp(uint3);
-long3 __ovld __cnfn convert_long3_sat_rtp(uint3);
-long3 __ovld __cnfn convert_long3_rtn(uint3);
-long3 __ovld __cnfn convert_long3_sat_rtn(uint3);
-long3 __ovld __cnfn convert_long3(uint3);
-long3 __ovld __cnfn convert_long3_sat(uint3);
-long3 __ovld __cnfn convert_long3_rte(long3);
-long3 __ovld __cnfn convert_long3_sat_rte(long3);
-long3 __ovld __cnfn convert_long3_rtz(long3);
-long3 __ovld __cnfn convert_long3_sat_rtz(long3);
-long3 __ovld __cnfn convert_long3_rtp(long3);
-long3 __ovld __cnfn convert_long3_sat_rtp(long3);
-long3 __ovld __cnfn convert_long3_rtn(long3);
-long3 __ovld __cnfn convert_long3_sat_rtn(long3);
-long3 __ovld __cnfn convert_long3(long3);
-long3 __ovld __cnfn convert_long3_sat(long3);
-long3 __ovld __cnfn convert_long3_rte(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rte(ulong3);
-long3 __ovld __cnfn convert_long3_rtz(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rtz(ulong3);
-long3 __ovld __cnfn convert_long3_rtp(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rtp(ulong3);
-long3 __ovld __cnfn convert_long3_rtn(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rtn(ulong3);
-long3 __ovld __cnfn convert_long3(ulong3);
-long3 __ovld __cnfn convert_long3_sat(ulong3);
-long3 __ovld __cnfn convert_long3_rte(float3);
-long3 __ovld __cnfn convert_long3_sat_rte(float3);
-long3 __ovld __cnfn convert_long3_rtz(float3);
-long3 __ovld __cnfn convert_long3_sat_rtz(float3);
-long3 __ovld __cnfn convert_long3_rtp(float3);
-long3 __ovld __cnfn convert_long3_sat_rtp(float3);
-long3 __ovld __cnfn convert_long3_rtn(float3);
-long3 __ovld __cnfn convert_long3_sat_rtn(float3);
-long3 __ovld __cnfn convert_long3(float3);
-long3 __ovld __cnfn convert_long3_sat(float3);
-ulong3 __ovld __cnfn convert_ulong3_rte(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3);
-ulong3 __ovld __cnfn convert_ulong3(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat(char3);
-ulong3 __ovld __cnfn convert_ulong3_rte(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uchar3);
-ulong3 __ovld __cnfn convert_ulong3(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rte(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(short3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(short3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(short3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(short3);
-ulong3 __ovld __cnfn convert_ulong3(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat(short3);
-ulong3 __ovld __cnfn convert_ulong3_rte(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ushort3);
-ulong3 __ovld __cnfn convert_ulong3(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rte(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(int3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(int3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(int3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(int3);
-ulong3 __ovld __cnfn convert_ulong3(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat(int3);
-ulong3 __ovld __cnfn convert_ulong3_rte(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uint3);
-ulong3 __ovld __cnfn convert_ulong3(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rte(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(long3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(long3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(long3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(long3);
-ulong3 __ovld __cnfn convert_ulong3(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat(long3);
-ulong3 __ovld __cnfn convert_ulong3_rte(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ulong3);
-ulong3 __ovld __cnfn convert_ulong3(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rte(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(float3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(float3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(float3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(float3);
-ulong3 __ovld __cnfn convert_ulong3(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat(float3);
-float3 __ovld __cnfn convert_float3_rte(char3);
-float3 __ovld __cnfn convert_float3_rtz(char3);
-float3 __ovld __cnfn convert_float3_rtp(char3);
-float3 __ovld __cnfn convert_float3_rtn(char3);
-float3 __ovld __cnfn convert_float3(char3);
-float3 __ovld __cnfn convert_float3_rte(uchar3);
-float3 __ovld __cnfn convert_float3_rtz(uchar3);
-float3 __ovld __cnfn convert_float3_rtp(uchar3);
-float3 __ovld __cnfn convert_float3_rtn(uchar3);
-float3 __ovld __cnfn convert_float3(uchar3);
-float3 __ovld __cnfn convert_float3_rte(short3);
-float3 __ovld __cnfn convert_float3_rtz(short3);
-float3 __ovld __cnfn convert_float3_rtp(short3);
-float3 __ovld __cnfn convert_float3_rtn(short3);
-float3 __ovld __cnfn convert_float3(short3);
-float3 __ovld __cnfn convert_float3_rte(ushort3);
-float3 __ovld __cnfn convert_float3_rtz(ushort3);
-float3 __ovld __cnfn convert_float3_rtp(ushort3);
-float3 __ovld __cnfn convert_float3_rtn(ushort3);
-float3 __ovld __cnfn convert_float3(ushort3);
-float3 __ovld __cnfn convert_float3_rte(int3);
-float3 __ovld __cnfn convert_float3_rtz(int3);
-float3 __ovld __cnfn convert_float3_rtp(int3);
-float3 __ovld __cnfn convert_float3_rtn(int3);
-float3 __ovld __cnfn convert_float3(int3);
-float3 __ovld __cnfn convert_float3_rte(uint3);
-float3 __ovld __cnfn convert_float3_rtz(uint3);
-float3 __ovld __cnfn convert_float3_rtp(uint3);
-float3 __ovld __cnfn convert_float3_rtn(uint3);
-float3 __ovld __cnfn convert_float3(uint3);
-float3 __ovld __cnfn convert_float3_rte(long3);
-float3 __ovld __cnfn convert_float3_rtz(long3);
-float3 __ovld __cnfn convert_float3_rtp(long3);
-float3 __ovld __cnfn convert_float3_rtn(long3);
-float3 __ovld __cnfn convert_float3(long3);
-float3 __ovld __cnfn convert_float3_rte(ulong3);
-float3 __ovld __cnfn convert_float3_rtz(ulong3);
-float3 __ovld __cnfn convert_float3_rtp(ulong3);
-float3 __ovld __cnfn convert_float3_rtn(ulong3);
-float3 __ovld __cnfn convert_float3(ulong3);
-float3 __ovld __cnfn convert_float3_rte(float3);
-float3 __ovld __cnfn convert_float3_rtz(float3);
-float3 __ovld __cnfn convert_float3_rtp(float3);
-float3 __ovld __cnfn convert_float3_rtn(float3);
-float3 __ovld __cnfn convert_float3(float3);
-char4 __ovld __cnfn convert_char4_rte(char4);
-char4 __ovld __cnfn convert_char4_sat_rte(char4);
-char4 __ovld __cnfn convert_char4_rtz(char4);
-char4 __ovld __cnfn convert_char4_sat_rtz(char4);
-char4 __ovld __cnfn convert_char4_rtp(char4);
-char4 __ovld __cnfn convert_char4_sat_rtp(char4);
-char4 __ovld __cnfn convert_char4_rtn(char4);
-char4 __ovld __cnfn convert_char4_sat_rtn(char4);
-char4 __ovld __cnfn convert_char4(char4);
-char4 __ovld __cnfn convert_char4_sat(char4);
-char4 __ovld __cnfn convert_char4_rte(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rte(uchar4);
-char4 __ovld __cnfn convert_char4_rtz(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rtz(uchar4);
-char4 __ovld __cnfn convert_char4_rtp(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rtp(uchar4);
-char4 __ovld __cnfn convert_char4_rtn(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rtn(uchar4);
-char4 __ovld __cnfn convert_char4(uchar4);
-char4 __ovld __cnfn convert_char4_sat(uchar4);
-char4 __ovld __cnfn convert_char4_rte(short4);
-char4 __ovld __cnfn convert_char4_sat_rte(short4);
-char4 __ovld __cnfn convert_char4_rtz(short4);
-char4 __ovld __cnfn convert_char4_sat_rtz(short4);
-char4 __ovld __cnfn convert_char4_rtp(short4);
-char4 __ovld __cnfn convert_char4_sat_rtp(short4);
-char4 __ovld __cnfn convert_char4_rtn(short4);
-char4 __ovld __cnfn convert_char4_sat_rtn(short4);
-char4 __ovld __cnfn convert_char4(short4);
-char4 __ovld __cnfn convert_char4_sat(short4);
-char4 __ovld __cnfn convert_char4_rte(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rte(ushort4);
-char4 __ovld __cnfn convert_char4_rtz(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rtz(ushort4);
-char4 __ovld __cnfn convert_char4_rtp(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rtp(ushort4);
-char4 __ovld __cnfn convert_char4_rtn(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rtn(ushort4);
-char4 __ovld __cnfn convert_char4(ushort4);
-char4 __ovld __cnfn convert_char4_sat(ushort4);
-char4 __ovld __cnfn convert_char4_rte(int4);
-char4 __ovld __cnfn convert_char4_sat_rte(int4);
-char4 __ovld __cnfn convert_char4_rtz(int4);
-char4 __ovld __cnfn convert_char4_sat_rtz(int4);
-char4 __ovld __cnfn convert_char4_rtp(int4);
-char4 __ovld __cnfn convert_char4_sat_rtp(int4);
-char4 __ovld __cnfn convert_char4_rtn(int4);
-char4 __ovld __cnfn convert_char4_sat_rtn(int4);
-char4 __ovld __cnfn convert_char4(int4);
-char4 __ovld __cnfn convert_char4_sat(int4);
-char4 __ovld __cnfn convert_char4_rte(uint4);
-char4 __ovld __cnfn convert_char4_sat_rte(uint4);
-char4 __ovld __cnfn convert_char4_rtz(uint4);
-char4 __ovld __cnfn convert_char4_sat_rtz(uint4);
-char4 __ovld __cnfn convert_char4_rtp(uint4);
-char4 __ovld __cnfn convert_char4_sat_rtp(uint4);
-char4 __ovld __cnfn convert_char4_rtn(uint4);
-char4 __ovld __cnfn convert_char4_sat_rtn(uint4);
-char4 __ovld __cnfn convert_char4(uint4);
-char4 __ovld __cnfn convert_char4_sat(uint4);
-char4 __ovld __cnfn convert_char4_rte(long4);
-char4 __ovld __cnfn convert_char4_sat_rte(long4);
-char4 __ovld __cnfn convert_char4_rtz(long4);
-char4 __ovld __cnfn convert_char4_sat_rtz(long4);
-char4 __ovld __cnfn convert_char4_rtp(long4);
-char4 __ovld __cnfn convert_char4_sat_rtp(long4);
-char4 __ovld __cnfn convert_char4_rtn(long4);
-char4 __ovld __cnfn convert_char4_sat_rtn(long4);
-char4 __ovld __cnfn convert_char4(long4);
-char4 __ovld __cnfn convert_char4_sat(long4);
-char4 __ovld __cnfn convert_char4_rte(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rte(ulong4);
-char4 __ovld __cnfn convert_char4_rtz(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rtz(ulong4);
-char4 __ovld __cnfn convert_char4_rtp(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rtp(ulong4);
-char4 __ovld __cnfn convert_char4_rtn(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rtn(ulong4);
-char4 __ovld __cnfn convert_char4(ulong4);
-char4 __ovld __cnfn convert_char4_sat(ulong4);
-char4 __ovld __cnfn convert_char4_rte(float4);
-char4 __ovld __cnfn convert_char4_sat_rte(float4);
-char4 __ovld __cnfn convert_char4_rtz(float4);
-char4 __ovld __cnfn convert_char4_sat_rtz(float4);
-char4 __ovld __cnfn convert_char4_rtp(float4);
-char4 __ovld __cnfn convert_char4_sat_rtp(float4);
-char4 __ovld __cnfn convert_char4_rtn(float4);
-char4 __ovld __cnfn convert_char4_sat_rtn(float4);
-char4 __ovld __cnfn convert_char4(float4);
-char4 __ovld __cnfn convert_char4_sat(float4);
-uchar4 __ovld __cnfn convert_uchar4_rte(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4);
-uchar4 __ovld __cnfn convert_uchar4(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat(char4);
-uchar4 __ovld __cnfn convert_uchar4_rte(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uchar4);
-uchar4 __ovld __cnfn convert_uchar4(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rte(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(short4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(short4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(short4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(short4);
-uchar4 __ovld __cnfn convert_uchar4(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat(short4);
-uchar4 __ovld __cnfn convert_uchar4_rte(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ushort4);
-uchar4 __ovld __cnfn convert_uchar4(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rte(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(int4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(int4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(int4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(int4);
-uchar4 __ovld __cnfn convert_uchar4(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat(int4);
-uchar4 __ovld __cnfn convert_uchar4_rte(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uint4);
-uchar4 __ovld __cnfn convert_uchar4(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rte(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(long4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(long4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(long4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(long4);
-uchar4 __ovld __cnfn convert_uchar4(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat(long4);
-uchar4 __ovld __cnfn convert_uchar4_rte(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ulong4);
-uchar4 __ovld __cnfn convert_uchar4(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rte(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(float4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(float4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(float4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(float4);
-uchar4 __ovld __cnfn convert_uchar4(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat(float4);
-short4 __ovld __cnfn convert_short4_rte(char4);
-short4 __ovld __cnfn convert_short4_sat_rte(char4);
-short4 __ovld __cnfn convert_short4_rtz(char4);
-short4 __ovld __cnfn convert_short4_sat_rtz(char4);
-short4 __ovld __cnfn convert_short4_rtp(char4);
-short4 __ovld __cnfn convert_short4_sat_rtp(char4);
-short4 __ovld __cnfn convert_short4_rtn(char4);
-short4 __ovld __cnfn convert_short4_sat_rtn(char4);
-short4 __ovld __cnfn convert_short4(char4);
-short4 __ovld __cnfn convert_short4_sat(char4);
-short4 __ovld __cnfn convert_short4_rte(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rte(uchar4);
-short4 __ovld __cnfn convert_short4_rtz(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rtz(uchar4);
-short4 __ovld __cnfn convert_short4_rtp(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rtp(uchar4);
-short4 __ovld __cnfn convert_short4_rtn(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rtn(uchar4);
-short4 __ovld __cnfn convert_short4(uchar4);
-short4 __ovld __cnfn convert_short4_sat(uchar4);
-short4 __ovld __cnfn convert_short4_rte(short4);
-short4 __ovld __cnfn convert_short4_sat_rte(short4);
-short4 __ovld __cnfn convert_short4_rtz(short4);
-short4 __ovld __cnfn convert_short4_sat_rtz(short4);
-short4 __ovld __cnfn convert_short4_rtp(short4);
-short4 __ovld __cnfn convert_short4_sat_rtp(short4);
-short4 __ovld __cnfn convert_short4_rtn(short4);
-short4 __ovld __cnfn convert_short4_sat_rtn(short4);
-short4 __ovld __cnfn convert_short4(short4);
-short4 __ovld __cnfn convert_short4_sat(short4);
-short4 __ovld __cnfn convert_short4_rte(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rte(ushort4);
-short4 __ovld __cnfn convert_short4_rtz(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rtz(ushort4);
-short4 __ovld __cnfn convert_short4_rtp(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rtp(ushort4);
-short4 __ovld __cnfn convert_short4_rtn(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rtn(ushort4);
-short4 __ovld __cnfn convert_short4(ushort4);
-short4 __ovld __cnfn convert_short4_sat(ushort4);
-short4 __ovld __cnfn convert_short4_rte(int4);
-short4 __ovld __cnfn convert_short4_sat_rte(int4);
-short4 __ovld __cnfn convert_short4_rtz(int4);
-short4 __ovld __cnfn convert_short4_sat_rtz(int4);
-short4 __ovld __cnfn convert_short4_rtp(int4);
-short4 __ovld __cnfn convert_short4_sat_rtp(int4);
-short4 __ovld __cnfn convert_short4_rtn(int4);
-short4 __ovld __cnfn convert_short4_sat_rtn(int4);
-short4 __ovld __cnfn convert_short4(int4);
-short4 __ovld __cnfn convert_short4_sat(int4);
-short4 __ovld __cnfn convert_short4_rte(uint4);
-short4 __ovld __cnfn convert_short4_sat_rte(uint4);
-short4 __ovld __cnfn convert_short4_rtz(uint4);
-short4 __ovld __cnfn convert_short4_sat_rtz(uint4);
-short4 __ovld __cnfn convert_short4_rtp(uint4);
-short4 __ovld __cnfn convert_short4_sat_rtp(uint4);
-short4 __ovld __cnfn convert_short4_rtn(uint4);
-short4 __ovld __cnfn convert_short4_sat_rtn(uint4);
-short4 __ovld __cnfn convert_short4(uint4);
-short4 __ovld __cnfn convert_short4_sat(uint4);
-short4 __ovld __cnfn convert_short4_rte(long4);
-short4 __ovld __cnfn convert_short4_sat_rte(long4);
-short4 __ovld __cnfn convert_short4_rtz(long4);
-short4 __ovld __cnfn convert_short4_sat_rtz(long4);
-short4 __ovld __cnfn convert_short4_rtp(long4);
-short4 __ovld __cnfn convert_short4_sat_rtp(long4);
-short4 __ovld __cnfn convert_short4_rtn(long4);
-short4 __ovld __cnfn convert_short4_sat_rtn(long4);
-short4 __ovld __cnfn convert_short4(long4);
-short4 __ovld __cnfn convert_short4_sat(long4);
-short4 __ovld __cnfn convert_short4_rte(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rte(ulong4);
-short4 __ovld __cnfn convert_short4_rtz(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rtz(ulong4);
-short4 __ovld __cnfn convert_short4_rtp(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rtp(ulong4);
-short4 __ovld __cnfn convert_short4_rtn(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rtn(ulong4);
-short4 __ovld __cnfn convert_short4(ulong4);
-short4 __ovld __cnfn convert_short4_sat(ulong4);
-short4 __ovld __cnfn convert_short4_rte(float4);
-short4 __ovld __cnfn convert_short4_sat_rte(float4);
-short4 __ovld __cnfn convert_short4_rtz(float4);
-short4 __ovld __cnfn convert_short4_sat_rtz(float4);
-short4 __ovld __cnfn convert_short4_rtp(float4);
-short4 __ovld __cnfn convert_short4_sat_rtp(float4);
-short4 __ovld __cnfn convert_short4_rtn(float4);
-short4 __ovld __cnfn convert_short4_sat_rtn(float4);
-short4 __ovld __cnfn convert_short4(float4);
-short4 __ovld __cnfn convert_short4_sat(float4);
-ushort4 __ovld __cnfn convert_ushort4_rte(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4);
-ushort4 __ovld __cnfn convert_ushort4(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat(char4);
-ushort4 __ovld __cnfn convert_ushort4_rte(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uchar4);
-ushort4 __ovld __cnfn convert_ushort4(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rte(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(short4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(short4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(short4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(short4);
-ushort4 __ovld __cnfn convert_ushort4(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat(short4);
-ushort4 __ovld __cnfn convert_ushort4_rte(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ushort4);
-ushort4 __ovld __cnfn convert_ushort4(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rte(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(int4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(int4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(int4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(int4);
-ushort4 __ovld __cnfn convert_ushort4(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat(int4);
-ushort4 __ovld __cnfn convert_ushort4_rte(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uint4);
-ushort4 __ovld __cnfn convert_ushort4(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rte(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(long4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(long4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(long4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(long4);
-ushort4 __ovld __cnfn convert_ushort4(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat(long4);
-ushort4 __ovld __cnfn convert_ushort4_rte(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ulong4);
-ushort4 __ovld __cnfn convert_ushort4(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rte(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(float4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(float4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(float4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(float4);
-ushort4 __ovld __cnfn convert_ushort4(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat(float4);
-int4 __ovld __cnfn convert_int4_rte(char4);
-int4 __ovld __cnfn convert_int4_sat_rte(char4);
-int4 __ovld __cnfn convert_int4_rtz(char4);
-int4 __ovld __cnfn convert_int4_sat_rtz(char4);
-int4 __ovld __cnfn convert_int4_rtp(char4);
-int4 __ovld __cnfn convert_int4_sat_rtp(char4);
-int4 __ovld __cnfn convert_int4_rtn(char4);
-int4 __ovld __cnfn convert_int4_sat_rtn(char4);
-int4 __ovld __cnfn convert_int4(char4);
-int4 __ovld __cnfn convert_int4_sat(char4);
-int4 __ovld __cnfn convert_int4_rte(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rte(uchar4);
-int4 __ovld __cnfn convert_int4_rtz(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rtz(uchar4);
-int4 __ovld __cnfn convert_int4_rtp(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rtp(uchar4);
-int4 __ovld __cnfn convert_int4_rtn(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rtn(uchar4);
-int4 __ovld __cnfn convert_int4(uchar4);
-int4 __ovld __cnfn convert_int4_sat(uchar4);
-int4 __ovld __cnfn convert_int4_rte(short4);
-int4 __ovld __cnfn convert_int4_sat_rte(short4);
-int4 __ovld __cnfn convert_int4_rtz(short4);
-int4 __ovld __cnfn convert_int4_sat_rtz(short4);
-int4 __ovld __cnfn convert_int4_rtp(short4);
-int4 __ovld __cnfn convert_int4_sat_rtp(short4);
-int4 __ovld __cnfn convert_int4_rtn(short4);
-int4 __ovld __cnfn convert_int4_sat_rtn(short4);
-int4 __ovld __cnfn convert_int4(short4);
-int4 __ovld __cnfn convert_int4_sat(short4);
-int4 __ovld __cnfn convert_int4_rte(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rte(ushort4);
-int4 __ovld __cnfn convert_int4_rtz(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rtz(ushort4);
-int4 __ovld __cnfn convert_int4_rtp(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rtp(ushort4);
-int4 __ovld __cnfn convert_int4_rtn(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rtn(ushort4);
-int4 __ovld __cnfn convert_int4(ushort4);
-int4 __ovld __cnfn convert_int4_sat(ushort4);
-int4 __ovld __cnfn convert_int4_rte(int4);
-int4 __ovld __cnfn convert_int4_sat_rte(int4);
-int4 __ovld __cnfn convert_int4_rtz(int4);
-int4 __ovld __cnfn convert_int4_sat_rtz(int4);
-int4 __ovld __cnfn convert_int4_rtp(int4);
-int4 __ovld __cnfn convert_int4_sat_rtp(int4);
-int4 __ovld __cnfn convert_int4_rtn(int4);
-int4 __ovld __cnfn convert_int4_sat_rtn(int4);
-int4 __ovld __cnfn convert_int4(int4);
-int4 __ovld __cnfn convert_int4_sat(int4);
-int4 __ovld __cnfn convert_int4_rte(uint4);
-int4 __ovld __cnfn convert_int4_sat_rte(uint4);
-int4 __ovld __cnfn convert_int4_rtz(uint4);
-int4 __ovld __cnfn convert_int4_sat_rtz(uint4);
-int4 __ovld __cnfn convert_int4_rtp(uint4);
-int4 __ovld __cnfn convert_int4_sat_rtp(uint4);
-int4 __ovld __cnfn convert_int4_rtn(uint4);
-int4 __ovld __cnfn convert_int4_sat_rtn(uint4);
-int4 __ovld __cnfn convert_int4(uint4);
-int4 __ovld __cnfn convert_int4_sat(uint4);
-int4 __ovld __cnfn convert_int4_rte(long4);
-int4 __ovld __cnfn convert_int4_sat_rte(long4);
-int4 __ovld __cnfn convert_int4_rtz(long4);
-int4 __ovld __cnfn convert_int4_sat_rtz(long4);
-int4 __ovld __cnfn convert_int4_rtp(long4);
-int4 __ovld __cnfn convert_int4_sat_rtp(long4);
-int4 __ovld __cnfn convert_int4_rtn(long4);
-int4 __ovld __cnfn convert_int4_sat_rtn(long4);
-int4 __ovld __cnfn convert_int4(long4);
-int4 __ovld __cnfn convert_int4_sat(long4);
-int4 __ovld __cnfn convert_int4_rte(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rte(ulong4);
-int4 __ovld __cnfn convert_int4_rtz(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rtz(ulong4);
-int4 __ovld __cnfn convert_int4_rtp(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rtp(ulong4);
-int4 __ovld __cnfn convert_int4_rtn(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rtn(ulong4);
-int4 __ovld __cnfn convert_int4(ulong4);
-int4 __ovld __cnfn convert_int4_sat(ulong4);
-int4 __ovld __cnfn convert_int4_rte(float4);
-int4 __ovld __cnfn convert_int4_sat_rte(float4);
-int4 __ovld __cnfn convert_int4_rtz(float4);
-int4 __ovld __cnfn convert_int4_sat_rtz(float4);
-int4 __ovld __cnfn convert_int4_rtp(float4);
-int4 __ovld __cnfn convert_int4_sat_rtp(float4);
-int4 __ovld __cnfn convert_int4_rtn(float4);
-int4 __ovld __cnfn convert_int4_sat_rtn(float4);
-int4 __ovld __cnfn convert_int4(float4);
-int4 __ovld __cnfn convert_int4_sat(float4);
-uint4 __ovld __cnfn convert_uint4_rte(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(char4);
-uint4 __ovld __cnfn convert_uint4_rtz(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(char4);
-uint4 __ovld __cnfn convert_uint4_rtp(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(char4);
-uint4 __ovld __cnfn convert_uint4_rtn(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(char4);
-uint4 __ovld __cnfn convert_uint4(char4);
-uint4 __ovld __cnfn convert_uint4_sat(char4);
-uint4 __ovld __cnfn convert_uint4_rte(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(uchar4);
-uint4 __ovld __cnfn convert_uint4_rtz(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(uchar4);
-uint4 __ovld __cnfn convert_uint4_rtp(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(uchar4);
-uint4 __ovld __cnfn convert_uint4_rtn(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(uchar4);
-uint4 __ovld __cnfn convert_uint4(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat(uchar4);
-uint4 __ovld __cnfn convert_uint4_rte(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(short4);
-uint4 __ovld __cnfn convert_uint4_rtz(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(short4);
-uint4 __ovld __cnfn convert_uint4_rtp(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(short4);
-uint4 __ovld __cnfn convert_uint4_rtn(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(short4);
-uint4 __ovld __cnfn convert_uint4(short4);
-uint4 __ovld __cnfn convert_uint4_sat(short4);
-uint4 __ovld __cnfn convert_uint4_rte(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(ushort4);
-uint4 __ovld __cnfn convert_uint4_rtz(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(ushort4);
-uint4 __ovld __cnfn convert_uint4_rtp(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(ushort4);
-uint4 __ovld __cnfn convert_uint4_rtn(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(ushort4);
-uint4 __ovld __cnfn convert_uint4(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat(ushort4);
-uint4 __ovld __cnfn convert_uint4_rte(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(int4);
-uint4 __ovld __cnfn convert_uint4_rtz(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(int4);
-uint4 __ovld __cnfn convert_uint4_rtp(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(int4);
-uint4 __ovld __cnfn convert_uint4_rtn(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(int4);
-uint4 __ovld __cnfn convert_uint4(int4);
-uint4 __ovld __cnfn convert_uint4_sat(int4);
-uint4 __ovld __cnfn convert_uint4_rte(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(uint4);
-uint4 __ovld __cnfn convert_uint4_rtz(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(uint4);
-uint4 __ovld __cnfn convert_uint4_rtp(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(uint4);
-uint4 __ovld __cnfn convert_uint4_rtn(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(uint4);
-uint4 __ovld __cnfn convert_uint4(uint4);
-uint4 __ovld __cnfn convert_uint4_sat(uint4);
-uint4 __ovld __cnfn convert_uint4_rte(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(long4);
-uint4 __ovld __cnfn convert_uint4_rtz(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(long4);
-uint4 __ovld __cnfn convert_uint4_rtp(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(long4);
-uint4 __ovld __cnfn convert_uint4_rtn(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(long4);
-uint4 __ovld __cnfn convert_uint4(long4);
-uint4 __ovld __cnfn convert_uint4_sat(long4);
-uint4 __ovld __cnfn convert_uint4_rte(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(ulong4);
-uint4 __ovld __cnfn convert_uint4_rtz(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(ulong4);
-uint4 __ovld __cnfn convert_uint4_rtp(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(ulong4);
-uint4 __ovld __cnfn convert_uint4_rtn(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(ulong4);
-uint4 __ovld __cnfn convert_uint4(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat(ulong4);
-uint4 __ovld __cnfn convert_uint4_rte(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(float4);
-uint4 __ovld __cnfn convert_uint4_rtz(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(float4);
-uint4 __ovld __cnfn convert_uint4_rtp(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(float4);
-uint4 __ovld __cnfn convert_uint4_rtn(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(float4);
-uint4 __ovld __cnfn convert_uint4(float4);
-uint4 __ovld __cnfn convert_uint4_sat(float4);
-long4 __ovld __cnfn convert_long4_rte(char4);
-long4 __ovld __cnfn convert_long4_sat_rte(char4);
-long4 __ovld __cnfn convert_long4_rtz(char4);
-long4 __ovld __cnfn convert_long4_sat_rtz(char4);
-long4 __ovld __cnfn convert_long4_rtp(char4);
-long4 __ovld __cnfn convert_long4_sat_rtp(char4);
-long4 __ovld __cnfn convert_long4_rtn(char4);
-long4 __ovld __cnfn convert_long4_sat_rtn(char4);
-long4 __ovld __cnfn convert_long4(char4);
-long4 __ovld __cnfn convert_long4_sat(char4);
-long4 __ovld __cnfn convert_long4_rte(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rte(uchar4);
-long4 __ovld __cnfn convert_long4_rtz(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rtz(uchar4);
-long4 __ovld __cnfn convert_long4_rtp(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rtp(uchar4);
-long4 __ovld __cnfn convert_long4_rtn(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rtn(uchar4);
-long4 __ovld __cnfn convert_long4(uchar4);
-long4 __ovld __cnfn convert_long4_sat(uchar4);
-long4 __ovld __cnfn convert_long4_rte(short4);
-long4 __ovld __cnfn convert_long4_sat_rte(short4);
-long4 __ovld __cnfn convert_long4_rtz(short4);
-long4 __ovld __cnfn convert_long4_sat_rtz(short4);
-long4 __ovld __cnfn convert_long4_rtp(short4);
-long4 __ovld __cnfn convert_long4_sat_rtp(short4);
-long4 __ovld __cnfn convert_long4_rtn(short4);
-long4 __ovld __cnfn convert_long4_sat_rtn(short4);
-long4 __ovld __cnfn convert_long4(short4);
-long4 __ovld __cnfn convert_long4_sat(short4);
-long4 __ovld __cnfn convert_long4_rte(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rte(ushort4);
-long4 __ovld __cnfn convert_long4_rtz(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rtz(ushort4);
-long4 __ovld __cnfn convert_long4_rtp(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rtp(ushort4);
-long4 __ovld __cnfn convert_long4_rtn(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rtn(ushort4);
-long4 __ovld __cnfn convert_long4(ushort4);
-long4 __ovld __cnfn convert_long4_sat(ushort4);
-long4 __ovld __cnfn convert_long4_rte(int4);
-long4 __ovld __cnfn convert_long4_sat_rte(int4);
-long4 __ovld __cnfn convert_long4_rtz(int4);
-long4 __ovld __cnfn convert_long4_sat_rtz(int4);
-long4 __ovld __cnfn convert_long4_rtp(int4);
-long4 __ovld __cnfn convert_long4_sat_rtp(int4);
-long4 __ovld __cnfn convert_long4_rtn(int4);
-long4 __ovld __cnfn convert_long4_sat_rtn(int4);
-long4 __ovld __cnfn convert_long4(int4);
-long4 __ovld __cnfn convert_long4_sat(int4);
-long4 __ovld __cnfn convert_long4_rte(uint4);
-long4 __ovld __cnfn convert_long4_sat_rte(uint4);
-long4 __ovld __cnfn convert_long4_rtz(uint4);
-long4 __ovld __cnfn convert_long4_sat_rtz(uint4);
-long4 __ovld __cnfn convert_long4_rtp(uint4);
-long4 __ovld __cnfn convert_long4_sat_rtp(uint4);
-long4 __ovld __cnfn convert_long4_rtn(uint4);
-long4 __ovld __cnfn convert_long4_sat_rtn(uint4);
-long4 __ovld __cnfn convert_long4(uint4);
-long4 __ovld __cnfn convert_long4_sat(uint4);
-long4 __ovld __cnfn convert_long4_rte(long4);
-long4 __ovld __cnfn convert_long4_sat_rte(long4);
-long4 __ovld __cnfn convert_long4_rtz(long4);
-long4 __ovld __cnfn convert_long4_sat_rtz(long4);
-long4 __ovld __cnfn convert_long4_rtp(long4);
-long4 __ovld __cnfn convert_long4_sat_rtp(long4);
-long4 __ovld __cnfn convert_long4_rtn(long4);
-long4 __ovld __cnfn convert_long4_sat_rtn(long4);
-long4 __ovld __cnfn convert_long4(long4);
-long4 __ovld __cnfn convert_long4_sat(long4);
-long4 __ovld __cnfn convert_long4_rte(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rte(ulong4);
-long4 __ovld __cnfn convert_long4_rtz(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rtz(ulong4);
-long4 __ovld __cnfn convert_long4_rtp(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rtp(ulong4);
-long4 __ovld __cnfn convert_long4_rtn(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rtn(ulong4);
-long4 __ovld __cnfn convert_long4(ulong4);
-long4 __ovld __cnfn convert_long4_sat(ulong4);
-long4 __ovld __cnfn convert_long4_rte(float4);
-long4 __ovld __cnfn convert_long4_sat_rte(float4);
-long4 __ovld __cnfn convert_long4_rtz(float4);
-long4 __ovld __cnfn convert_long4_sat_rtz(float4);
-long4 __ovld __cnfn convert_long4_rtp(float4);
-long4 __ovld __cnfn convert_long4_sat_rtp(float4);
-long4 __ovld __cnfn convert_long4_rtn(float4);
-long4 __ovld __cnfn convert_long4_sat_rtn(float4);
-long4 __ovld __cnfn convert_long4(float4);
-long4 __ovld __cnfn convert_long4_sat(float4);
-ulong4 __ovld __cnfn convert_ulong4_rte(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4);
-ulong4 __ovld __cnfn convert_ulong4(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat(char4);
-ulong4 __ovld __cnfn convert_ulong4_rte(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uchar4);
-ulong4 __ovld __cnfn convert_ulong4(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rte(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(short4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(short4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(short4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(short4);
-ulong4 __ovld __cnfn convert_ulong4(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat(short4);
-ulong4 __ovld __cnfn convert_ulong4_rte(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ushort4);
-ulong4 __ovld __cnfn convert_ulong4(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rte(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(int4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(int4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(int4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(int4);
-ulong4 __ovld __cnfn convert_ulong4(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat(int4);
-ulong4 __ovld __cnfn convert_ulong4_rte(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uint4);
-ulong4 __ovld __cnfn convert_ulong4(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rte(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(long4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(long4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(long4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(long4);
-ulong4 __ovld __cnfn convert_ulong4(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat(long4);
-ulong4 __ovld __cnfn convert_ulong4_rte(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ulong4);
-ulong4 __ovld __cnfn convert_ulong4(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rte(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(float4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(float4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(float4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(float4);
-ulong4 __ovld __cnfn convert_ulong4(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat(float4);
-float4 __ovld __cnfn convert_float4_rte(char4);
-float4 __ovld __cnfn convert_float4_rtz(char4);
-float4 __ovld __cnfn convert_float4_rtp(char4);
-float4 __ovld __cnfn convert_float4_rtn(char4);
-float4 __ovld __cnfn convert_float4(char4);
-float4 __ovld __cnfn convert_float4_rte(uchar4);
-float4 __ovld __cnfn convert_float4_rtz(uchar4);
-float4 __ovld __cnfn convert_float4_rtp(uchar4);
-float4 __ovld __cnfn convert_float4_rtn(uchar4);
-float4 __ovld __cnfn convert_float4(uchar4);
-float4 __ovld __cnfn convert_float4_rte(short4);
-float4 __ovld __cnfn convert_float4_rtz(short4);
-float4 __ovld __cnfn convert_float4_rtp(short4);
-float4 __ovld __cnfn convert_float4_rtn(short4);
-float4 __ovld __cnfn convert_float4(short4);
-float4 __ovld __cnfn convert_float4_rte(ushort4);
-float4 __ovld __cnfn convert_float4_rtz(ushort4);
-float4 __ovld __cnfn convert_float4_rtp(ushort4);
-float4 __ovld __cnfn convert_float4_rtn(ushort4);
-float4 __ovld __cnfn convert_float4(ushort4);
-float4 __ovld __cnfn convert_float4_rte(int4);
-float4 __ovld __cnfn convert_float4_rtz(int4);
-float4 __ovld __cnfn convert_float4_rtp(int4);
-float4 __ovld __cnfn convert_float4_rtn(int4);
-float4 __ovld __cnfn convert_float4(int4);
-float4 __ovld __cnfn convert_float4_rte(uint4);
-float4 __ovld __cnfn convert_float4_rtz(uint4);
-float4 __ovld __cnfn convert_float4_rtp(uint4);
-float4 __ovld __cnfn convert_float4_rtn(uint4);
-float4 __ovld __cnfn convert_float4(uint4);
-float4 __ovld __cnfn convert_float4_rte(long4);
-float4 __ovld __cnfn convert_float4_rtz(long4);
-float4 __ovld __cnfn convert_float4_rtp(long4);
-float4 __ovld __cnfn convert_float4_rtn(long4);
-float4 __ovld __cnfn convert_float4(long4);
-float4 __ovld __cnfn convert_float4_rte(ulong4);
-float4 __ovld __cnfn convert_float4_rtz(ulong4);
-float4 __ovld __cnfn convert_float4_rtp(ulong4);
-float4 __ovld __cnfn convert_float4_rtn(ulong4);
-float4 __ovld __cnfn convert_float4(ulong4);
-float4 __ovld __cnfn convert_float4_rte(float4);
-float4 __ovld __cnfn convert_float4_rtz(float4);
-float4 __ovld __cnfn convert_float4_rtp(float4);
-float4 __ovld __cnfn convert_float4_rtn(float4);
-float4 __ovld __cnfn convert_float4(float4);
-char8 __ovld __cnfn convert_char8_rte(char8);
-char8 __ovld __cnfn convert_char8_sat_rte(char8);
-char8 __ovld __cnfn convert_char8_rtz(char8);
-char8 __ovld __cnfn convert_char8_sat_rtz(char8);
-char8 __ovld __cnfn convert_char8_rtp(char8);
-char8 __ovld __cnfn convert_char8_sat_rtp(char8);
-char8 __ovld __cnfn convert_char8_rtn(char8);
-char8 __ovld __cnfn convert_char8_sat_rtn(char8);
-char8 __ovld __cnfn convert_char8(char8);
-char8 __ovld __cnfn convert_char8_sat(char8);
-char8 __ovld __cnfn convert_char8_rte(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rte(uchar8);
-char8 __ovld __cnfn convert_char8_rtz(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rtz(uchar8);
-char8 __ovld __cnfn convert_char8_rtp(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rtp(uchar8);
-char8 __ovld __cnfn convert_char8_rtn(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rtn(uchar8);
-char8 __ovld __cnfn convert_char8(uchar8);
-char8 __ovld __cnfn convert_char8_sat(uchar8);
-char8 __ovld __cnfn convert_char8_rte(short8);
-char8 __ovld __cnfn convert_char8_sat_rte(short8);
-char8 __ovld __cnfn convert_char8_rtz(short8);
-char8 __ovld __cnfn convert_char8_sat_rtz(short8);
-char8 __ovld __cnfn convert_char8_rtp(short8);
-char8 __ovld __cnfn convert_char8_sat_rtp(short8);
-char8 __ovld __cnfn convert_char8_rtn(short8);
-char8 __ovld __cnfn convert_char8_sat_rtn(short8);
-char8 __ovld __cnfn convert_char8(short8);
-char8 __ovld __cnfn convert_char8_sat(short8);
-char8 __ovld __cnfn convert_char8_rte(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rte(ushort8);
-char8 __ovld __cnfn convert_char8_rtz(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rtz(ushort8);
-char8 __ovld __cnfn convert_char8_rtp(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rtp(ushort8);
-char8 __ovld __cnfn convert_char8_rtn(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rtn(ushort8);
-char8 __ovld __cnfn convert_char8(ushort8);
-char8 __ovld __cnfn convert_char8_sat(ushort8);
-char8 __ovld __cnfn convert_char8_rte(int8);
-char8 __ovld __cnfn convert_char8_sat_rte(int8);
-char8 __ovld __cnfn convert_char8_rtz(int8);
-char8 __ovld __cnfn convert_char8_sat_rtz(int8);
-char8 __ovld __cnfn convert_char8_rtp(int8);
-char8 __ovld __cnfn convert_char8_sat_rtp(int8);
-char8 __ovld __cnfn convert_char8_rtn(int8);
-char8 __ovld __cnfn convert_char8_sat_rtn(int8);
-char8 __ovld __cnfn convert_char8(int8);
-char8 __ovld __cnfn convert_char8_sat(int8);
-char8 __ovld __cnfn convert_char8_rte(uint8);
-char8 __ovld __cnfn convert_char8_sat_rte(uint8);
-char8 __ovld __cnfn convert_char8_rtz(uint8);
-char8 __ovld __cnfn convert_char8_sat_rtz(uint8);
-char8 __ovld __cnfn convert_char8_rtp(uint8);
-char8 __ovld __cnfn convert_char8_sat_rtp(uint8);
-char8 __ovld __cnfn convert_char8_rtn(uint8);
-char8 __ovld __cnfn convert_char8_sat_rtn(uint8);
-char8 __ovld __cnfn convert_char8(uint8);
-char8 __ovld __cnfn convert_char8_sat(uint8);
-char8 __ovld __cnfn convert_char8_rte(long8);
-char8 __ovld __cnfn convert_char8_sat_rte(long8);
-char8 __ovld __cnfn convert_char8_rtz(long8);
-char8 __ovld __cnfn convert_char8_sat_rtz(long8);
-char8 __ovld __cnfn convert_char8_rtp(long8);
-char8 __ovld __cnfn convert_char8_sat_rtp(long8);
-char8 __ovld __cnfn convert_char8_rtn(long8);
-char8 __ovld __cnfn convert_char8_sat_rtn(long8);
-char8 __ovld __cnfn convert_char8(long8);
-char8 __ovld __cnfn convert_char8_sat(long8);
-char8 __ovld __cnfn convert_char8_rte(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rte(ulong8);
-char8 __ovld __cnfn convert_char8_rtz(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rtz(ulong8);
-char8 __ovld __cnfn convert_char8_rtp(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rtp(ulong8);
-char8 __ovld __cnfn convert_char8_rtn(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rtn(ulong8);
-char8 __ovld __cnfn convert_char8(ulong8);
-char8 __ovld __cnfn convert_char8_sat(ulong8);
-char8 __ovld __cnfn convert_char8_rte(float8);
-char8 __ovld __cnfn convert_char8_sat_rte(float8);
-char8 __ovld __cnfn convert_char8_rtz(float8);
-char8 __ovld __cnfn convert_char8_sat_rtz(float8);
-char8 __ovld __cnfn convert_char8_rtp(float8);
-char8 __ovld __cnfn convert_char8_sat_rtp(float8);
-char8 __ovld __cnfn convert_char8_rtn(float8);
-char8 __ovld __cnfn convert_char8_sat_rtn(float8);
-char8 __ovld __cnfn convert_char8(float8);
-char8 __ovld __cnfn convert_char8_sat(float8);
-uchar8 __ovld __cnfn convert_uchar8_rte(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8);
-uchar8 __ovld __cnfn convert_uchar8(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat(char8);
-uchar8 __ovld __cnfn convert_uchar8_rte(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uchar8);
-uchar8 __ovld __cnfn convert_uchar8(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rte(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(short8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(short8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(short8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(short8);
-uchar8 __ovld __cnfn convert_uchar8(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat(short8);
-uchar8 __ovld __cnfn convert_uchar8_rte(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ushort8);
-uchar8 __ovld __cnfn convert_uchar8(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rte(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(int8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(int8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(int8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(int8);
-uchar8 __ovld __cnfn convert_uchar8(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat(int8);
-uchar8 __ovld __cnfn convert_uchar8_rte(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uint8);
-uchar8 __ovld __cnfn convert_uchar8(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rte(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(long8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(long8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(long8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(long8);
-uchar8 __ovld __cnfn convert_uchar8(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat(long8);
-uchar8 __ovld __cnfn convert_uchar8_rte(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ulong8);
-uchar8 __ovld __cnfn convert_uchar8(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rte(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(float8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(float8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(float8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(float8);
-uchar8 __ovld __cnfn convert_uchar8(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat(float8);
-short8 __ovld __cnfn convert_short8_rte(char8);
-short8 __ovld __cnfn convert_short8_sat_rte(char8);
-short8 __ovld __cnfn convert_short8_rtz(char8);
-short8 __ovld __cnfn convert_short8_sat_rtz(char8);
-short8 __ovld __cnfn convert_short8_rtp(char8);
-short8 __ovld __cnfn convert_short8_sat_rtp(char8);
-short8 __ovld __cnfn convert_short8_rtn(char8);
-short8 __ovld __cnfn convert_short8_sat_rtn(char8);
-short8 __ovld __cnfn convert_short8(char8);
-short8 __ovld __cnfn convert_short8_sat(char8);
-short8 __ovld __cnfn convert_short8_rte(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rte(uchar8);
-short8 __ovld __cnfn convert_short8_rtz(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rtz(uchar8);
-short8 __ovld __cnfn convert_short8_rtp(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rtp(uchar8);
-short8 __ovld __cnfn convert_short8_rtn(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rtn(uchar8);
-short8 __ovld __cnfn convert_short8(uchar8);
-short8 __ovld __cnfn convert_short8_sat(uchar8);
-short8 __ovld __cnfn convert_short8_rte(short8);
-short8 __ovld __cnfn convert_short8_sat_rte(short8);
-short8 __ovld __cnfn convert_short8_rtz(short8);
-short8 __ovld __cnfn convert_short8_sat_rtz(short8);
-short8 __ovld __cnfn convert_short8_rtp(short8);
-short8 __ovld __cnfn convert_short8_sat_rtp(short8);
-short8 __ovld __cnfn convert_short8_rtn(short8);
-short8 __ovld __cnfn convert_short8_sat_rtn(short8);
-short8 __ovld __cnfn convert_short8(short8);
-short8 __ovld __cnfn convert_short8_sat(short8);
-short8 __ovld __cnfn convert_short8_rte(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rte(ushort8);
-short8 __ovld __cnfn convert_short8_rtz(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rtz(ushort8);
-short8 __ovld __cnfn convert_short8_rtp(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rtp(ushort8);
-short8 __ovld __cnfn convert_short8_rtn(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rtn(ushort8);
-short8 __ovld __cnfn convert_short8(ushort8);
-short8 __ovld __cnfn convert_short8_sat(ushort8);
-short8 __ovld __cnfn convert_short8_rte(int8);
-short8 __ovld __cnfn convert_short8_sat_rte(int8);
-short8 __ovld __cnfn convert_short8_rtz(int8);
-short8 __ovld __cnfn convert_short8_sat_rtz(int8);
-short8 __ovld __cnfn convert_short8_rtp(int8);
-short8 __ovld __cnfn convert_short8_sat_rtp(int8);
-short8 __ovld __cnfn convert_short8_rtn(int8);
-short8 __ovld __cnfn convert_short8_sat_rtn(int8);
-short8 __ovld __cnfn convert_short8(int8);
-short8 __ovld __cnfn convert_short8_sat(int8);
-short8 __ovld __cnfn convert_short8_rte(uint8);
-short8 __ovld __cnfn convert_short8_sat_rte(uint8);
-short8 __ovld __cnfn convert_short8_rtz(uint8);
-short8 __ovld __cnfn convert_short8_sat_rtz(uint8);
-short8 __ovld __cnfn convert_short8_rtp(uint8);
-short8 __ovld __cnfn convert_short8_sat_rtp(uint8);
-short8 __ovld __cnfn convert_short8_rtn(uint8);
-short8 __ovld __cnfn convert_short8_sat_rtn(uint8);
-short8 __ovld __cnfn convert_short8(uint8);
-short8 __ovld __cnfn convert_short8_sat(uint8);
-short8 __ovld __cnfn convert_short8_rte(long8);
-short8 __ovld __cnfn convert_short8_sat_rte(long8);
-short8 __ovld __cnfn convert_short8_rtz(long8);
-short8 __ovld __cnfn convert_short8_sat_rtz(long8);
-short8 __ovld __cnfn convert_short8_rtp(long8);
-short8 __ovld __cnfn convert_short8_sat_rtp(long8);
-short8 __ovld __cnfn convert_short8_rtn(long8);
-short8 __ovld __cnfn convert_short8_sat_rtn(long8);
-short8 __ovld __cnfn convert_short8(long8);
-short8 __ovld __cnfn convert_short8_sat(long8);
-short8 __ovld __cnfn convert_short8_rte(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rte(ulong8);
-short8 __ovld __cnfn convert_short8_rtz(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rtz(ulong8);
-short8 __ovld __cnfn convert_short8_rtp(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rtp(ulong8);
-short8 __ovld __cnfn convert_short8_rtn(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rtn(ulong8);
-short8 __ovld __cnfn convert_short8(ulong8);
-short8 __ovld __cnfn convert_short8_sat(ulong8);
-short8 __ovld __cnfn convert_short8_rte(float8);
-short8 __ovld __cnfn convert_short8_sat_rte(float8);
-short8 __ovld __cnfn convert_short8_rtz(float8);
-short8 __ovld __cnfn convert_short8_sat_rtz(float8);
-short8 __ovld __cnfn convert_short8_rtp(float8);
-short8 __ovld __cnfn convert_short8_sat_rtp(float8);
-short8 __ovld __cnfn convert_short8_rtn(float8);
-short8 __ovld __cnfn convert_short8_sat_rtn(float8);
-short8 __ovld __cnfn convert_short8(float8);
-short8 __ovld __cnfn convert_short8_sat(float8);
-ushort8 __ovld __cnfn convert_ushort8_rte(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8);
-ushort8 __ovld __cnfn convert_ushort8(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat(char8);
-ushort8 __ovld __cnfn convert_ushort8_rte(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uchar8);
-ushort8 __ovld __cnfn convert_ushort8(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rte(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(short8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(short8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(short8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(short8);
-ushort8 __ovld __cnfn convert_ushort8(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat(short8);
-ushort8 __ovld __cnfn convert_ushort8_rte(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ushort8);
-ushort8 __ovld __cnfn convert_ushort8(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rte(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(int8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(int8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(int8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(int8);
-ushort8 __ovld __cnfn convert_ushort8(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat(int8);
-ushort8 __ovld __cnfn convert_ushort8_rte(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uint8);
-ushort8 __ovld __cnfn convert_ushort8(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rte(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(long8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(long8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(long8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(long8);
-ushort8 __ovld __cnfn convert_ushort8(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat(long8);
-ushort8 __ovld __cnfn convert_ushort8_rte(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ulong8);
-ushort8 __ovld __cnfn convert_ushort8(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rte(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(float8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(float8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(float8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(float8);
-ushort8 __ovld __cnfn convert_ushort8(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat(float8);
-int8 __ovld __cnfn convert_int8_rte(char8);
-int8 __ovld __cnfn convert_int8_sat_rte(char8);
-int8 __ovld __cnfn convert_int8_rtz(char8);
-int8 __ovld __cnfn convert_int8_sat_rtz(char8);
-int8 __ovld __cnfn convert_int8_rtp(char8);
-int8 __ovld __cnfn convert_int8_sat_rtp(char8);
-int8 __ovld __cnfn convert_int8_rtn(char8);
-int8 __ovld __cnfn convert_int8_sat_rtn(char8);
-int8 __ovld __cnfn convert_int8(char8);
-int8 __ovld __cnfn convert_int8_sat(char8);
-int8 __ovld __cnfn convert_int8_rte(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rte(uchar8);
-int8 __ovld __cnfn convert_int8_rtz(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rtz(uchar8);
-int8 __ovld __cnfn convert_int8_rtp(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rtp(uchar8);
-int8 __ovld __cnfn convert_int8_rtn(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rtn(uchar8);
-int8 __ovld __cnfn convert_int8(uchar8);
-int8 __ovld __cnfn convert_int8_sat(uchar8);
-int8 __ovld __cnfn convert_int8_rte(short8);
-int8 __ovld __cnfn convert_int8_sat_rte(short8);
-int8 __ovld __cnfn convert_int8_rtz(short8);
-int8 __ovld __cnfn convert_int8_sat_rtz(short8);
-int8 __ovld __cnfn convert_int8_rtp(short8);
-int8 __ovld __cnfn convert_int8_sat_rtp(short8);
-int8 __ovld __cnfn convert_int8_rtn(short8);
-int8 __ovld __cnfn convert_int8_sat_rtn(short8);
-int8 __ovld __cnfn convert_int8(short8);
-int8 __ovld __cnfn convert_int8_sat(short8);
-int8 __ovld __cnfn convert_int8_rte(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rte(ushort8);
-int8 __ovld __cnfn convert_int8_rtz(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rtz(ushort8);
-int8 __ovld __cnfn convert_int8_rtp(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rtp(ushort8);
-int8 __ovld __cnfn convert_int8_rtn(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rtn(ushort8);
-int8 __ovld __cnfn convert_int8(ushort8);
-int8 __ovld __cnfn convert_int8_sat(ushort8);
-int8 __ovld __cnfn convert_int8_rte(int8);
-int8 __ovld __cnfn convert_int8_sat_rte(int8);
-int8 __ovld __cnfn convert_int8_rtz(int8);
-int8 __ovld __cnfn convert_int8_sat_rtz(int8);
-int8 __ovld __cnfn convert_int8_rtp(int8);
-int8 __ovld __cnfn convert_int8_sat_rtp(int8);
-int8 __ovld __cnfn convert_int8_rtn(int8);
-int8 __ovld __cnfn convert_int8_sat_rtn(int8);
-int8 __ovld __cnfn convert_int8(int8);
-int8 __ovld __cnfn convert_int8_sat(int8);
-int8 __ovld __cnfn convert_int8_rte(uint8);
-int8 __ovld __cnfn convert_int8_sat_rte(uint8);
-int8 __ovld __cnfn convert_int8_rtz(uint8);
-int8 __ovld __cnfn convert_int8_sat_rtz(uint8);
-int8 __ovld __cnfn convert_int8_rtp(uint8);
-int8 __ovld __cnfn convert_int8_sat_rtp(uint8);
-int8 __ovld __cnfn convert_int8_rtn(uint8);
-int8 __ovld __cnfn convert_int8_sat_rtn(uint8);
-int8 __ovld __cnfn convert_int8(uint8);
-int8 __ovld __cnfn convert_int8_sat(uint8);
-int8 __ovld __cnfn convert_int8_rte(long8);
-int8 __ovld __cnfn convert_int8_sat_rte(long8);
-int8 __ovld __cnfn convert_int8_rtz(long8);
-int8 __ovld __cnfn convert_int8_sat_rtz(long8);
-int8 __ovld __cnfn convert_int8_rtp(long8);
-int8 __ovld __cnfn convert_int8_sat_rtp(long8);
-int8 __ovld __cnfn convert_int8_rtn(long8);
-int8 __ovld __cnfn convert_int8_sat_rtn(long8);
-int8 __ovld __cnfn convert_int8(long8);
-int8 __ovld __cnfn convert_int8_sat(long8);
-int8 __ovld __cnfn convert_int8_rte(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rte(ulong8);
-int8 __ovld __cnfn convert_int8_rtz(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rtz(ulong8);
-int8 __ovld __cnfn convert_int8_rtp(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rtp(ulong8);
-int8 __ovld __cnfn convert_int8_rtn(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rtn(ulong8);
-int8 __ovld __cnfn convert_int8(ulong8);
-int8 __ovld __cnfn convert_int8_sat(ulong8);
-int8 __ovld __cnfn convert_int8_rte(float8);
-int8 __ovld __cnfn convert_int8_sat_rte(float8);
-int8 __ovld __cnfn convert_int8_rtz(float8);
-int8 __ovld __cnfn convert_int8_sat_rtz(float8);
-int8 __ovld __cnfn convert_int8_rtp(float8);
-int8 __ovld __cnfn convert_int8_sat_rtp(float8);
-int8 __ovld __cnfn convert_int8_rtn(float8);
-int8 __ovld __cnfn convert_int8_sat_rtn(float8);
-int8 __ovld __cnfn convert_int8(float8);
-int8 __ovld __cnfn convert_int8_sat(float8);
-uint8 __ovld __cnfn convert_uint8_rte(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(char8);
-uint8 __ovld __cnfn convert_uint8_rtz(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(char8);
-uint8 __ovld __cnfn convert_uint8_rtp(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(char8);
-uint8 __ovld __cnfn convert_uint8_rtn(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(char8);
-uint8 __ovld __cnfn convert_uint8(char8);
-uint8 __ovld __cnfn convert_uint8_sat(char8);
-uint8 __ovld __cnfn convert_uint8_rte(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(uchar8);
-uint8 __ovld __cnfn convert_uint8_rtz(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(uchar8);
-uint8 __ovld __cnfn convert_uint8_rtp(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(uchar8);
-uint8 __ovld __cnfn convert_uint8_rtn(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(uchar8);
-uint8 __ovld __cnfn convert_uint8(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat(uchar8);
-uint8 __ovld __cnfn convert_uint8_rte(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(short8);
-uint8 __ovld __cnfn convert_uint8_rtz(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(short8);
-uint8 __ovld __cnfn convert_uint8_rtp(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(short8);
-uint8 __ovld __cnfn convert_uint8_rtn(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(short8);
-uint8 __ovld __cnfn convert_uint8(short8);
-uint8 __ovld __cnfn convert_uint8_sat(short8);
-uint8 __ovld __cnfn convert_uint8_rte(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(ushort8);
-uint8 __ovld __cnfn convert_uint8_rtz(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(ushort8);
-uint8 __ovld __cnfn convert_uint8_rtp(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(ushort8);
-uint8 __ovld __cnfn convert_uint8_rtn(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(ushort8);
-uint8 __ovld __cnfn convert_uint8(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat(ushort8);
-uint8 __ovld __cnfn convert_uint8_rte(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(int8);
-uint8 __ovld __cnfn convert_uint8_rtz(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(int8);
-uint8 __ovld __cnfn convert_uint8_rtp(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(int8);
-uint8 __ovld __cnfn convert_uint8_rtn(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(int8);
-uint8 __ovld __cnfn convert_uint8(int8);
-uint8 __ovld __cnfn convert_uint8_sat(int8);
-uint8 __ovld __cnfn convert_uint8_rte(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(uint8);
-uint8 __ovld __cnfn convert_uint8_rtz(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(uint8);
-uint8 __ovld __cnfn convert_uint8_rtp(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(uint8);
-uint8 __ovld __cnfn convert_uint8_rtn(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(uint8);
-uint8 __ovld __cnfn convert_uint8(uint8);
-uint8 __ovld __cnfn convert_uint8_sat(uint8);
-uint8 __ovld __cnfn convert_uint8_rte(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(long8);
-uint8 __ovld __cnfn convert_uint8_rtz(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(long8);
-uint8 __ovld __cnfn convert_uint8_rtp(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(long8);
-uint8 __ovld __cnfn convert_uint8_rtn(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(long8);
-uint8 __ovld __cnfn convert_uint8(long8);
-uint8 __ovld __cnfn convert_uint8_sat(long8);
-uint8 __ovld __cnfn convert_uint8_rte(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(ulong8);
-uint8 __ovld __cnfn convert_uint8_rtz(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(ulong8);
-uint8 __ovld __cnfn convert_uint8_rtp(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(ulong8);
-uint8 __ovld __cnfn convert_uint8_rtn(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(ulong8);
-uint8 __ovld __cnfn convert_uint8(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat(ulong8);
-uint8 __ovld __cnfn convert_uint8_rte(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(float8);
-uint8 __ovld __cnfn convert_uint8_rtz(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(float8);
-uint8 __ovld __cnfn convert_uint8_rtp(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(float8);
-uint8 __ovld __cnfn convert_uint8_rtn(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(float8);
-uint8 __ovld __cnfn convert_uint8(float8);
-uint8 __ovld __cnfn convert_uint8_sat(float8);
-long8 __ovld __cnfn convert_long8_rte(char8);
-long8 __ovld __cnfn convert_long8_sat_rte(char8);
-long8 __ovld __cnfn convert_long8_rtz(char8);
-long8 __ovld __cnfn convert_long8_sat_rtz(char8);
-long8 __ovld __cnfn convert_long8_rtp(char8);
-long8 __ovld __cnfn convert_long8_sat_rtp(char8);
-long8 __ovld __cnfn convert_long8_rtn(char8);
-long8 __ovld __cnfn convert_long8_sat_rtn(char8);
-long8 __ovld __cnfn convert_long8(char8);
-long8 __ovld __cnfn convert_long8_sat(char8);
-long8 __ovld __cnfn convert_long8_rte(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rte(uchar8);
-long8 __ovld __cnfn convert_long8_rtz(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rtz(uchar8);
-long8 __ovld __cnfn convert_long8_rtp(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rtp(uchar8);
-long8 __ovld __cnfn convert_long8_rtn(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rtn(uchar8);
-long8 __ovld __cnfn convert_long8(uchar8);
-long8 __ovld __cnfn convert_long8_sat(uchar8);
-long8 __ovld __cnfn convert_long8_rte(short8);
-long8 __ovld __cnfn convert_long8_sat_rte(short8);
-long8 __ovld __cnfn convert_long8_rtz(short8);
-long8 __ovld __cnfn convert_long8_sat_rtz(short8);
-long8 __ovld __cnfn convert_long8_rtp(short8);
-long8 __ovld __cnfn convert_long8_sat_rtp(short8);
-long8 __ovld __cnfn convert_long8_rtn(short8);
-long8 __ovld __cnfn convert_long8_sat_rtn(short8);
-long8 __ovld __cnfn convert_long8(short8);
-long8 __ovld __cnfn convert_long8_sat(short8);
-long8 __ovld __cnfn convert_long8_rte(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rte(ushort8);
-long8 __ovld __cnfn convert_long8_rtz(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rtz(ushort8);
-long8 __ovld __cnfn convert_long8_rtp(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rtp(ushort8);
-long8 __ovld __cnfn convert_long8_rtn(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rtn(ushort8);
-long8 __ovld __cnfn convert_long8(ushort8);
-long8 __ovld __cnfn convert_long8_sat(ushort8);
-long8 __ovld __cnfn convert_long8_rte(int8);
-long8 __ovld __cnfn convert_long8_sat_rte(int8);
-long8 __ovld __cnfn convert_long8_rtz(int8);
-long8 __ovld __cnfn convert_long8_sat_rtz(int8);
-long8 __ovld __cnfn convert_long8_rtp(int8);
-long8 __ovld __cnfn convert_long8_sat_rtp(int8);
-long8 __ovld __cnfn convert_long8_rtn(int8);
-long8 __ovld __cnfn convert_long8_sat_rtn(int8);
-long8 __ovld __cnfn convert_long8(int8);
-long8 __ovld __cnfn convert_long8_sat(int8);
-long8 __ovld __cnfn convert_long8_rte(uint8);
-long8 __ovld __cnfn convert_long8_sat_rte(uint8);
-long8 __ovld __cnfn convert_long8_rtz(uint8);
-long8 __ovld __cnfn convert_long8_sat_rtz(uint8);
-long8 __ovld __cnfn convert_long8_rtp(uint8);
-long8 __ovld __cnfn convert_long8_sat_rtp(uint8);
-long8 __ovld __cnfn convert_long8_rtn(uint8);
-long8 __ovld __cnfn convert_long8_sat_rtn(uint8);
-long8 __ovld __cnfn convert_long8(uint8);
-long8 __ovld __cnfn convert_long8_sat(uint8);
-long8 __ovld __cnfn convert_long8_rte(long8);
-long8 __ovld __cnfn convert_long8_sat_rte(long8);
-long8 __ovld __cnfn convert_long8_rtz(long8);
-long8 __ovld __cnfn convert_long8_sat_rtz(long8);
-long8 __ovld __cnfn convert_long8_rtp(long8);
-long8 __ovld __cnfn convert_long8_sat_rtp(long8);
-long8 __ovld __cnfn convert_long8_rtn(long8);
-long8 __ovld __cnfn convert_long8_sat_rtn(long8);
-long8 __ovld __cnfn convert_long8(long8);
-long8 __ovld __cnfn convert_long8_sat(long8);
-long8 __ovld __cnfn convert_long8_rte(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rte(ulong8);
-long8 __ovld __cnfn convert_long8_rtz(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rtz(ulong8);
-long8 __ovld __cnfn convert_long8_rtp(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rtp(ulong8);
-long8 __ovld __cnfn convert_long8_rtn(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rtn(ulong8);
-long8 __ovld __cnfn convert_long8(ulong8);
-long8 __ovld __cnfn convert_long8_sat(ulong8);
-long8 __ovld __cnfn convert_long8_rte(float8);
-long8 __ovld __cnfn convert_long8_sat_rte(float8);
-long8 __ovld __cnfn convert_long8_rtz(float8);
-long8 __ovld __cnfn convert_long8_sat_rtz(float8);
-long8 __ovld __cnfn convert_long8_rtp(float8);
-long8 __ovld __cnfn convert_long8_sat_rtp(float8);
-long8 __ovld __cnfn convert_long8_rtn(float8);
-long8 __ovld __cnfn convert_long8_sat_rtn(float8);
-long8 __ovld __cnfn convert_long8(float8);
-long8 __ovld __cnfn convert_long8_sat(float8);
-ulong8 __ovld __cnfn convert_ulong8_rte(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8);
-ulong8 __ovld __cnfn convert_ulong8(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat(char8);
-ulong8 __ovld __cnfn convert_ulong8_rte(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uchar8);
-ulong8 __ovld __cnfn convert_ulong8(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rte(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(short8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(short8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(short8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(short8);
-ulong8 __ovld __cnfn convert_ulong8(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat(short8);
-ulong8 __ovld __cnfn convert_ulong8_rte(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ushort8);
-ulong8 __ovld __cnfn convert_ulong8(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rte(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(int8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(int8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(int8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(int8);
-ulong8 __ovld __cnfn convert_ulong8(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat(int8);
-ulong8 __ovld __cnfn convert_ulong8_rte(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uint8);
-ulong8 __ovld __cnfn convert_ulong8(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rte(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(long8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(long8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(long8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(long8);
-ulong8 __ovld __cnfn convert_ulong8(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat(long8);
-ulong8 __ovld __cnfn convert_ulong8_rte(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ulong8);
-ulong8 __ovld __cnfn convert_ulong8(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rte(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(float8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(float8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(float8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(float8);
-ulong8 __ovld __cnfn convert_ulong8(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat(float8);
-float8 __ovld __cnfn convert_float8_rte(char8);
-float8 __ovld __cnfn convert_float8_rtz(char8);
-float8 __ovld __cnfn convert_float8_rtp(char8);
-float8 __ovld __cnfn convert_float8_rtn(char8);
-float8 __ovld __cnfn convert_float8(char8);
-float8 __ovld __cnfn convert_float8_rte(uchar8);
-float8 __ovld __cnfn convert_float8_rtz(uchar8);
-float8 __ovld __cnfn convert_float8_rtp(uchar8);
-float8 __ovld __cnfn convert_float8_rtn(uchar8);
-float8 __ovld __cnfn convert_float8(uchar8);
-float8 __ovld __cnfn convert_float8_rte(short8);
-float8 __ovld __cnfn convert_float8_rtz(short8);
-float8 __ovld __cnfn convert_float8_rtp(short8);
-float8 __ovld __cnfn convert_float8_rtn(short8);
-float8 __ovld __cnfn convert_float8(short8);
-float8 __ovld __cnfn convert_float8_rte(ushort8);
-float8 __ovld __cnfn convert_float8_rtz(ushort8);
-float8 __ovld __cnfn convert_float8_rtp(ushort8);
-float8 __ovld __cnfn convert_float8_rtn(ushort8);
-float8 __ovld __cnfn convert_float8(ushort8);
-float8 __ovld __cnfn convert_float8_rte(int8);
-float8 __ovld __cnfn convert_float8_rtz(int8);
-float8 __ovld __cnfn convert_float8_rtp(int8);
-float8 __ovld __cnfn convert_float8_rtn(int8);
-float8 __ovld __cnfn convert_float8(int8);
-float8 __ovld __cnfn convert_float8_rte(uint8);
-float8 __ovld __cnfn convert_float8_rtz(uint8);
-float8 __ovld __cnfn convert_float8_rtp(uint8);
-float8 __ovld __cnfn convert_float8_rtn(uint8);
-float8 __ovld __cnfn convert_float8(uint8);
-float8 __ovld __cnfn convert_float8_rte(long8);
-float8 __ovld __cnfn convert_float8_rtz(long8);
-float8 __ovld __cnfn convert_float8_rtp(long8);
-float8 __ovld __cnfn convert_float8_rtn(long8);
-float8 __ovld __cnfn convert_float8(long8);
-float8 __ovld __cnfn convert_float8_rte(ulong8);
-float8 __ovld __cnfn convert_float8_rtz(ulong8);
-float8 __ovld __cnfn convert_float8_rtp(ulong8);
-float8 __ovld __cnfn convert_float8_rtn(ulong8);
-float8 __ovld __cnfn convert_float8(ulong8);
-float8 __ovld __cnfn convert_float8_rte(float8);
-float8 __ovld __cnfn convert_float8_rtz(float8);
-float8 __ovld __cnfn convert_float8_rtp(float8);
-float8 __ovld __cnfn convert_float8_rtn(float8);
-float8 __ovld __cnfn convert_float8(float8);
-char16 __ovld __cnfn convert_char16_rte(char16);
-char16 __ovld __cnfn convert_char16_sat_rte(char16);
-char16 __ovld __cnfn convert_char16_rtz(char16);
-char16 __ovld __cnfn convert_char16_sat_rtz(char16);
-char16 __ovld __cnfn convert_char16_rtp(char16);
-char16 __ovld __cnfn convert_char16_sat_rtp(char16);
-char16 __ovld __cnfn convert_char16_rtn(char16);
-char16 __ovld __cnfn convert_char16_sat_rtn(char16);
-char16 __ovld __cnfn convert_char16(char16);
-char16 __ovld __cnfn convert_char16_sat(char16);
-char16 __ovld __cnfn convert_char16_rte(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rte(uchar16);
-char16 __ovld __cnfn convert_char16_rtz(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rtz(uchar16);
-char16 __ovld __cnfn convert_char16_rtp(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rtp(uchar16);
-char16 __ovld __cnfn convert_char16_rtn(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rtn(uchar16);
-char16 __ovld __cnfn convert_char16(uchar16);
-char16 __ovld __cnfn convert_char16_sat(uchar16);
-char16 __ovld __cnfn convert_char16_rte(short16);
-char16 __ovld __cnfn convert_char16_sat_rte(short16);
-char16 __ovld __cnfn convert_char16_rtz(short16);
-char16 __ovld __cnfn convert_char16_sat_rtz(short16);
-char16 __ovld __cnfn convert_char16_rtp(short16);
-char16 __ovld __cnfn convert_char16_sat_rtp(short16);
-char16 __ovld __cnfn convert_char16_rtn(short16);
-char16 __ovld __cnfn convert_char16_sat_rtn(short16);
-char16 __ovld __cnfn convert_char16(short16);
-char16 __ovld __cnfn convert_char16_sat(short16);
-char16 __ovld __cnfn convert_char16_rte(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rte(ushort16);
-char16 __ovld __cnfn convert_char16_rtz(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rtz(ushort16);
-char16 __ovld __cnfn convert_char16_rtp(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rtp(ushort16);
-char16 __ovld __cnfn convert_char16_rtn(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rtn(ushort16);
-char16 __ovld __cnfn convert_char16(ushort16);
-char16 __ovld __cnfn convert_char16_sat(ushort16);
-char16 __ovld __cnfn convert_char16_rte(int16);
-char16 __ovld __cnfn convert_char16_sat_rte(int16);
-char16 __ovld __cnfn convert_char16_rtz(int16);
-char16 __ovld __cnfn convert_char16_sat_rtz(int16);
-char16 __ovld __cnfn convert_char16_rtp(int16);
-char16 __ovld __cnfn convert_char16_sat_rtp(int16);
-char16 __ovld __cnfn convert_char16_rtn(int16);
-char16 __ovld __cnfn convert_char16_sat_rtn(int16);
-char16 __ovld __cnfn convert_char16(int16);
-char16 __ovld __cnfn convert_char16_sat(int16);
-char16 __ovld __cnfn convert_char16_rte(uint16);
-char16 __ovld __cnfn convert_char16_sat_rte(uint16);
-char16 __ovld __cnfn convert_char16_rtz(uint16);
-char16 __ovld __cnfn convert_char16_sat_rtz(uint16);
-char16 __ovld __cnfn convert_char16_rtp(uint16);
-char16 __ovld __cnfn convert_char16_sat_rtp(uint16);
-char16 __ovld __cnfn convert_char16_rtn(uint16);
-char16 __ovld __cnfn convert_char16_sat_rtn(uint16);
-char16 __ovld __cnfn convert_char16(uint16);
-char16 __ovld __cnfn convert_char16_sat(uint16);
-char16 __ovld __cnfn convert_char16_rte(long16);
-char16 __ovld __cnfn convert_char16_sat_rte(long16);
-char16 __ovld __cnfn convert_char16_rtz(long16);
-char16 __ovld __cnfn convert_char16_sat_rtz(long16);
-char16 __ovld __cnfn convert_char16_rtp(long16);
-char16 __ovld __cnfn convert_char16_sat_rtp(long16);
-char16 __ovld __cnfn convert_char16_rtn(long16);
-char16 __ovld __cnfn convert_char16_sat_rtn(long16);
-char16 __ovld __cnfn convert_char16(long16);
-char16 __ovld __cnfn convert_char16_sat(long16);
-char16 __ovld __cnfn convert_char16_rte(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rte(ulong16);
-char16 __ovld __cnfn convert_char16_rtz(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rtz(ulong16);
-char16 __ovld __cnfn convert_char16_rtp(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rtp(ulong16);
-char16 __ovld __cnfn convert_char16_rtn(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rtn(ulong16);
-char16 __ovld __cnfn convert_char16(ulong16);
-char16 __ovld __cnfn convert_char16_sat(ulong16);
-char16 __ovld __cnfn convert_char16_rte(float16);
-char16 __ovld __cnfn convert_char16_sat_rte(float16);
-char16 __ovld __cnfn convert_char16_rtz(float16);
-char16 __ovld __cnfn convert_char16_sat_rtz(float16);
-char16 __ovld __cnfn convert_char16_rtp(float16);
-char16 __ovld __cnfn convert_char16_sat_rtp(float16);
-char16 __ovld __cnfn convert_char16_rtn(float16);
-char16 __ovld __cnfn convert_char16_sat_rtn(float16);
-char16 __ovld __cnfn convert_char16(float16);
-char16 __ovld __cnfn convert_char16_sat(float16);
-uchar16 __ovld __cnfn convert_uchar16_rte(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16);
-uchar16 __ovld __cnfn convert_uchar16(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat(char16);
-uchar16 __ovld __cnfn convert_uchar16_rte(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uchar16);
-uchar16 __ovld __cnfn convert_uchar16(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rte(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(short16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(short16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(short16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(short16);
-uchar16 __ovld __cnfn convert_uchar16(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat(short16);
-uchar16 __ovld __cnfn convert_uchar16_rte(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ushort16);
-uchar16 __ovld __cnfn convert_uchar16(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rte(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(int16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(int16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(int16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(int16);
-uchar16 __ovld __cnfn convert_uchar16(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat(int16);
-uchar16 __ovld __cnfn convert_uchar16_rte(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uint16);
-uchar16 __ovld __cnfn convert_uchar16(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rte(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(long16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(long16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(long16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(long16);
-uchar16 __ovld __cnfn convert_uchar16(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat(long16);
-uchar16 __ovld __cnfn convert_uchar16_rte(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ulong16);
-uchar16 __ovld __cnfn convert_uchar16(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rte(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(float16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(float16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(float16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(float16);
-uchar16 __ovld __cnfn convert_uchar16(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat(float16);
-short16 __ovld __cnfn convert_short16_rte(char16);
-short16 __ovld __cnfn convert_short16_sat_rte(char16);
-short16 __ovld __cnfn convert_short16_rtz(char16);
-short16 __ovld __cnfn convert_short16_sat_rtz(char16);
-short16 __ovld __cnfn convert_short16_rtp(char16);
-short16 __ovld __cnfn convert_short16_sat_rtp(char16);
-short16 __ovld __cnfn convert_short16_rtn(char16);
-short16 __ovld __cnfn convert_short16_sat_rtn(char16);
-short16 __ovld __cnfn convert_short16(char16);
-short16 __ovld __cnfn convert_short16_sat(char16);
-short16 __ovld __cnfn convert_short16_rte(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rte(uchar16);
-short16 __ovld __cnfn convert_short16_rtz(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rtz(uchar16);
-short16 __ovld __cnfn convert_short16_rtp(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rtp(uchar16);
-short16 __ovld __cnfn convert_short16_rtn(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rtn(uchar16);
-short16 __ovld __cnfn convert_short16(uchar16);
-short16 __ovld __cnfn convert_short16_sat(uchar16);
-short16 __ovld __cnfn convert_short16_rte(short16);
-short16 __ovld __cnfn convert_short16_sat_rte(short16);
-short16 __ovld __cnfn convert_short16_rtz(short16);
-short16 __ovld __cnfn convert_short16_sat_rtz(short16);
-short16 __ovld __cnfn convert_short16_rtp(short16);
-short16 __ovld __cnfn convert_short16_sat_rtp(short16);
-short16 __ovld __cnfn convert_short16_rtn(short16);
-short16 __ovld __cnfn convert_short16_sat_rtn(short16);
-short16 __ovld __cnfn convert_short16(short16);
-short16 __ovld __cnfn convert_short16_sat(short16);
-short16 __ovld __cnfn convert_short16_rte(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rte(ushort16);
-short16 __ovld __cnfn convert_short16_rtz(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rtz(ushort16);
-short16 __ovld __cnfn convert_short16_rtp(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rtp(ushort16);
-short16 __ovld __cnfn convert_short16_rtn(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rtn(ushort16);
-short16 __ovld __cnfn convert_short16(ushort16);
-short16 __ovld __cnfn convert_short16_sat(ushort16);
-short16 __ovld __cnfn convert_short16_rte(int16);
-short16 __ovld __cnfn convert_short16_sat_rte(int16);
-short16 __ovld __cnfn convert_short16_rtz(int16);
-short16 __ovld __cnfn convert_short16_sat_rtz(int16);
-short16 __ovld __cnfn convert_short16_rtp(int16);
-short16 __ovld __cnfn convert_short16_sat_rtp(int16);
-short16 __ovld __cnfn convert_short16_rtn(int16);
-short16 __ovld __cnfn convert_short16_sat_rtn(int16);
-short16 __ovld __cnfn convert_short16(int16);
-short16 __ovld __cnfn convert_short16_sat(int16);
-short16 __ovld __cnfn convert_short16_rte(uint16);
-short16 __ovld __cnfn convert_short16_sat_rte(uint16);
-short16 __ovld __cnfn convert_short16_rtz(uint16);
-short16 __ovld __cnfn convert_short16_sat_rtz(uint16);
-short16 __ovld __cnfn convert_short16_rtp(uint16);
-short16 __ovld __cnfn convert_short16_sat_rtp(uint16);
-short16 __ovld __cnfn convert_short16_rtn(uint16);
-short16 __ovld __cnfn convert_short16_sat_rtn(uint16);
-short16 __ovld __cnfn convert_short16(uint16);
-short16 __ovld __cnfn convert_short16_sat(uint16);
-short16 __ovld __cnfn convert_short16_rte(long16);
-short16 __ovld __cnfn convert_short16_sat_rte(long16);
-short16 __ovld __cnfn convert_short16_rtz(long16);
-short16 __ovld __cnfn convert_short16_sat_rtz(long16);
-short16 __ovld __cnfn convert_short16_rtp(long16);
-short16 __ovld __cnfn convert_short16_sat_rtp(long16);
-short16 __ovld __cnfn convert_short16_rtn(long16);
-short16 __ovld __cnfn convert_short16_sat_rtn(long16);
-short16 __ovld __cnfn convert_short16(long16);
-short16 __ovld __cnfn convert_short16_sat(long16);
-short16 __ovld __cnfn convert_short16_rte(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rte(ulong16);
-short16 __ovld __cnfn convert_short16_rtz(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rtz(ulong16);
-short16 __ovld __cnfn convert_short16_rtp(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rtp(ulong16);
-short16 __ovld __cnfn convert_short16_rtn(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rtn(ulong16);
-short16 __ovld __cnfn convert_short16(ulong16);
-short16 __ovld __cnfn convert_short16_sat(ulong16);
-short16 __ovld __cnfn convert_short16_rte(float16);
-short16 __ovld __cnfn convert_short16_sat_rte(float16);
-short16 __ovld __cnfn convert_short16_rtz(float16);
-short16 __ovld __cnfn convert_short16_sat_rtz(float16);
-short16 __ovld __cnfn convert_short16_rtp(float16);
-short16 __ovld __cnfn convert_short16_sat_rtp(float16);
-short16 __ovld __cnfn convert_short16_rtn(float16);
-short16 __ovld __cnfn convert_short16_sat_rtn(float16);
-short16 __ovld __cnfn convert_short16(float16);
-short16 __ovld __cnfn convert_short16_sat(float16);
-ushort16 __ovld __cnfn convert_ushort16_rte(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16);
-ushort16 __ovld __cnfn convert_ushort16(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat(char16);
-ushort16 __ovld __cnfn convert_ushort16_rte(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uchar16);
-ushort16 __ovld __cnfn convert_ushort16(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rte(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(short16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(short16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(short16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(short16);
-ushort16 __ovld __cnfn convert_ushort16(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat(short16);
-ushort16 __ovld __cnfn convert_ushort16_rte(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ushort16);
-ushort16 __ovld __cnfn convert_ushort16(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rte(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(int16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(int16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(int16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(int16);
-ushort16 __ovld __cnfn convert_ushort16(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat(int16);
-ushort16 __ovld __cnfn convert_ushort16_rte(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uint16);
-ushort16 __ovld __cnfn convert_ushort16(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rte(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(long16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(long16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(long16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(long16);
-ushort16 __ovld __cnfn convert_ushort16(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat(long16);
-ushort16 __ovld __cnfn convert_ushort16_rte(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ulong16);
-ushort16 __ovld __cnfn convert_ushort16(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rte(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(float16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(float16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(float16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(float16);
-ushort16 __ovld __cnfn convert_ushort16(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat(float16);
-int16 __ovld __cnfn convert_int16_rte(char16);
-int16 __ovld __cnfn convert_int16_sat_rte(char16);
-int16 __ovld __cnfn convert_int16_rtz(char16);
-int16 __ovld __cnfn convert_int16_sat_rtz(char16);
-int16 __ovld __cnfn convert_int16_rtp(char16);
-int16 __ovld __cnfn convert_int16_sat_rtp(char16);
-int16 __ovld __cnfn convert_int16_rtn(char16);
-int16 __ovld __cnfn convert_int16_sat_rtn(char16);
-int16 __ovld __cnfn convert_int16(char16);
-int16 __ovld __cnfn convert_int16_sat(char16);
-int16 __ovld __cnfn convert_int16_rte(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rte(uchar16);
-int16 __ovld __cnfn convert_int16_rtz(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rtz(uchar16);
-int16 __ovld __cnfn convert_int16_rtp(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rtp(uchar16);
-int16 __ovld __cnfn convert_int16_rtn(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rtn(uchar16);
-int16 __ovld __cnfn convert_int16(uchar16);
-int16 __ovld __cnfn convert_int16_sat(uchar16);
-int16 __ovld __cnfn convert_int16_rte(short16);
-int16 __ovld __cnfn convert_int16_sat_rte(short16);
-int16 __ovld __cnfn convert_int16_rtz(short16);
-int16 __ovld __cnfn convert_int16_sat_rtz(short16);
-int16 __ovld __cnfn convert_int16_rtp(short16);
-int16 __ovld __cnfn convert_int16_sat_rtp(short16);
-int16 __ovld __cnfn convert_int16_rtn(short16);
-int16 __ovld __cnfn convert_int16_sat_rtn(short16);
-int16 __ovld __cnfn convert_int16(short16);
-int16 __ovld __cnfn convert_int16_sat(short16);
-int16 __ovld __cnfn convert_int16_rte(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rte(ushort16);
-int16 __ovld __cnfn convert_int16_rtz(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rtz(ushort16);
-int16 __ovld __cnfn convert_int16_rtp(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rtp(ushort16);
-int16 __ovld __cnfn convert_int16_rtn(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rtn(ushort16);
-int16 __ovld __cnfn convert_int16(ushort16);
-int16 __ovld __cnfn convert_int16_sat(ushort16);
-int16 __ovld __cnfn convert_int16_rte(int16);
-int16 __ovld __cnfn convert_int16_sat_rte(int16);
-int16 __ovld __cnfn convert_int16_rtz(int16);
-int16 __ovld __cnfn convert_int16_sat_rtz(int16);
-int16 __ovld __cnfn convert_int16_rtp(int16);
-int16 __ovld __cnfn convert_int16_sat_rtp(int16);
-int16 __ovld __cnfn convert_int16_rtn(int16);
-int16 __ovld __cnfn convert_int16_sat_rtn(int16);
-int16 __ovld __cnfn convert_int16(int16);
-int16 __ovld __cnfn convert_int16_sat(int16);
-int16 __ovld __cnfn convert_int16_rte(uint16);
-int16 __ovld __cnfn convert_int16_sat_rte(uint16);
-int16 __ovld __cnfn convert_int16_rtz(uint16);
-int16 __ovld __cnfn convert_int16_sat_rtz(uint16);
-int16 __ovld __cnfn convert_int16_rtp(uint16);
-int16 __ovld __cnfn convert_int16_sat_rtp(uint16);
-int16 __ovld __cnfn convert_int16_rtn(uint16);
-int16 __ovld __cnfn convert_int16_sat_rtn(uint16);
-int16 __ovld __cnfn convert_int16(uint16);
-int16 __ovld __cnfn convert_int16_sat(uint16);
-int16 __ovld __cnfn convert_int16_rte(long16);
-int16 __ovld __cnfn convert_int16_sat_rte(long16);
-int16 __ovld __cnfn convert_int16_rtz(long16);
-int16 __ovld __cnfn convert_int16_sat_rtz(long16);
-int16 __ovld __cnfn convert_int16_rtp(long16);
-int16 __ovld __cnfn convert_int16_sat_rtp(long16);
-int16 __ovld __cnfn convert_int16_rtn(long16);
-int16 __ovld __cnfn convert_int16_sat_rtn(long16);
-int16 __ovld __cnfn convert_int16(long16);
-int16 __ovld __cnfn convert_int16_sat(long16);
-int16 __ovld __cnfn convert_int16_rte(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rte(ulong16);
-int16 __ovld __cnfn convert_int16_rtz(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rtz(ulong16);
-int16 __ovld __cnfn convert_int16_rtp(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rtp(ulong16);
-int16 __ovld __cnfn convert_int16_rtn(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rtn(ulong16);
-int16 __ovld __cnfn convert_int16(ulong16);
-int16 __ovld __cnfn convert_int16_sat(ulong16);
-int16 __ovld __cnfn convert_int16_rte(float16);
-int16 __ovld __cnfn convert_int16_sat_rte(float16);
-int16 __ovld __cnfn convert_int16_rtz(float16);
-int16 __ovld __cnfn convert_int16_sat_rtz(float16);
-int16 __ovld __cnfn convert_int16_rtp(float16);
-int16 __ovld __cnfn convert_int16_sat_rtp(float16);
-int16 __ovld __cnfn convert_int16_rtn(float16);
-int16 __ovld __cnfn convert_int16_sat_rtn(float16);
-int16 __ovld __cnfn convert_int16(float16);
-int16 __ovld __cnfn convert_int16_sat(float16);
-uint16 __ovld __cnfn convert_uint16_rte(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(char16);
-uint16 __ovld __cnfn convert_uint16_rtz(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(char16);
-uint16 __ovld __cnfn convert_uint16_rtp(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(char16);
-uint16 __ovld __cnfn convert_uint16_rtn(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(char16);
-uint16 __ovld __cnfn convert_uint16(char16);
-uint16 __ovld __cnfn convert_uint16_sat(char16);
-uint16 __ovld __cnfn convert_uint16_rte(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(uchar16);
-uint16 __ovld __cnfn convert_uint16_rtz(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(uchar16);
-uint16 __ovld __cnfn convert_uint16_rtp(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(uchar16);
-uint16 __ovld __cnfn convert_uint16_rtn(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(uchar16);
-uint16 __ovld __cnfn convert_uint16(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat(uchar16);
-uint16 __ovld __cnfn convert_uint16_rte(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(short16);
-uint16 __ovld __cnfn convert_uint16_rtz(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(short16);
-uint16 __ovld __cnfn convert_uint16_rtp(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(short16);
-uint16 __ovld __cnfn convert_uint16_rtn(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(short16);
-uint16 __ovld __cnfn convert_uint16(short16);
-uint16 __ovld __cnfn convert_uint16_sat(short16);
-uint16 __ovld __cnfn convert_uint16_rte(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(ushort16);
-uint16 __ovld __cnfn convert_uint16_rtz(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(ushort16);
-uint16 __ovld __cnfn convert_uint16_rtp(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(ushort16);
-uint16 __ovld __cnfn convert_uint16_rtn(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(ushort16);
-uint16 __ovld __cnfn convert_uint16(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat(ushort16);
-uint16 __ovld __cnfn convert_uint16_rte(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(int16);
-uint16 __ovld __cnfn convert_uint16_rtz(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(int16);
-uint16 __ovld __cnfn convert_uint16_rtp(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(int16);
-uint16 __ovld __cnfn convert_uint16_rtn(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(int16);
-uint16 __ovld __cnfn convert_uint16(int16);
-uint16 __ovld __cnfn convert_uint16_sat(int16);
-uint16 __ovld __cnfn convert_uint16_rte(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(uint16);
-uint16 __ovld __cnfn convert_uint16_rtz(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(uint16);
-uint16 __ovld __cnfn convert_uint16_rtp(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(uint16);
-uint16 __ovld __cnfn convert_uint16_rtn(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(uint16);
-uint16 __ovld __cnfn convert_uint16(uint16);
-uint16 __ovld __cnfn convert_uint16_sat(uint16);
-uint16 __ovld __cnfn convert_uint16_rte(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(long16);
-uint16 __ovld __cnfn convert_uint16_rtz(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(long16);
-uint16 __ovld __cnfn convert_uint16_rtp(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(long16);
-uint16 __ovld __cnfn convert_uint16_rtn(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(long16);
-uint16 __ovld __cnfn convert_uint16(long16);
-uint16 __ovld __cnfn convert_uint16_sat(long16);
-uint16 __ovld __cnfn convert_uint16_rte(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(ulong16);
-uint16 __ovld __cnfn convert_uint16_rtz(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(ulong16);
-uint16 __ovld __cnfn convert_uint16_rtp(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(ulong16);
-uint16 __ovld __cnfn convert_uint16_rtn(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(ulong16);
-uint16 __ovld __cnfn convert_uint16(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat(ulong16);
-uint16 __ovld __cnfn convert_uint16_rte(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(float16);
-uint16 __ovld __cnfn convert_uint16_rtz(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(float16);
-uint16 __ovld __cnfn convert_uint16_rtp(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(float16);
-uint16 __ovld __cnfn convert_uint16_rtn(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(float16);
-uint16 __ovld __cnfn convert_uint16(float16);
-uint16 __ovld __cnfn convert_uint16_sat(float16);
-long16 __ovld __cnfn convert_long16_rte(char16);
-long16 __ovld __cnfn convert_long16_sat_rte(char16);
-long16 __ovld __cnfn convert_long16_rtz(char16);
-long16 __ovld __cnfn convert_long16_sat_rtz(char16);
-long16 __ovld __cnfn convert_long16_rtp(char16);
-long16 __ovld __cnfn convert_long16_sat_rtp(char16);
-long16 __ovld __cnfn convert_long16_rtn(char16);
-long16 __ovld __cnfn convert_long16_sat_rtn(char16);
-long16 __ovld __cnfn convert_long16(char16);
-long16 __ovld __cnfn convert_long16_sat(char16);
-long16 __ovld __cnfn convert_long16_rte(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rte(uchar16);
-long16 __ovld __cnfn convert_long16_rtz(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rtz(uchar16);
-long16 __ovld __cnfn convert_long16_rtp(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rtp(uchar16);
-long16 __ovld __cnfn convert_long16_rtn(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rtn(uchar16);
-long16 __ovld __cnfn convert_long16(uchar16);
-long16 __ovld __cnfn convert_long16_sat(uchar16);
-long16 __ovld __cnfn convert_long16_rte(short16);
-long16 __ovld __cnfn convert_long16_sat_rte(short16);
-long16 __ovld __cnfn convert_long16_rtz(short16);
-long16 __ovld __cnfn convert_long16_sat_rtz(short16);
-long16 __ovld __cnfn convert_long16_rtp(short16);
-long16 __ovld __cnfn convert_long16_sat_rtp(short16);
-long16 __ovld __cnfn convert_long16_rtn(short16);
-long16 __ovld __cnfn convert_long16_sat_rtn(short16);
-long16 __ovld __cnfn convert_long16(short16);
-long16 __ovld __cnfn convert_long16_sat(short16);
-long16 __ovld __cnfn convert_long16_rte(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rte(ushort16);
-long16 __ovld __cnfn convert_long16_rtz(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rtz(ushort16);
-long16 __ovld __cnfn convert_long16_rtp(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rtp(ushort16);
-long16 __ovld __cnfn convert_long16_rtn(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rtn(ushort16);
-long16 __ovld __cnfn convert_long16(ushort16);
-long16 __ovld __cnfn convert_long16_sat(ushort16);
-long16 __ovld __cnfn convert_long16_rte(int16);
-long16 __ovld __cnfn convert_long16_sat_rte(int16);
-long16 __ovld __cnfn convert_long16_rtz(int16);
-long16 __ovld __cnfn convert_long16_sat_rtz(int16);
-long16 __ovld __cnfn convert_long16_rtp(int16);
-long16 __ovld __cnfn convert_long16_sat_rtp(int16);
-long16 __ovld __cnfn convert_long16_rtn(int16);
-long16 __ovld __cnfn convert_long16_sat_rtn(int16);
-long16 __ovld __cnfn convert_long16(int16);
-long16 __ovld __cnfn convert_long16_sat(int16);
-long16 __ovld __cnfn convert_long16_rte(uint16);
-long16 __ovld __cnfn convert_long16_sat_rte(uint16);
-long16 __ovld __cnfn convert_long16_rtz(uint16);
-long16 __ovld __cnfn convert_long16_sat_rtz(uint16);
-long16 __ovld __cnfn convert_long16_rtp(uint16);
-long16 __ovld __cnfn convert_long16_sat_rtp(uint16);
-long16 __ovld __cnfn convert_long16_rtn(uint16);
-long16 __ovld __cnfn convert_long16_sat_rtn(uint16);
-long16 __ovld __cnfn convert_long16(uint16);
-long16 __ovld __cnfn convert_long16_sat(uint16);
-long16 __ovld __cnfn convert_long16_rte(long16);
-long16 __ovld __cnfn convert_long16_sat_rte(long16);
-long16 __ovld __cnfn convert_long16_rtz(long16);
-long16 __ovld __cnfn convert_long16_sat_rtz(long16);
-long16 __ovld __cnfn convert_long16_rtp(long16);
-long16 __ovld __cnfn convert_long16_sat_rtp(long16);
-long16 __ovld __cnfn convert_long16_rtn(long16);
-long16 __ovld __cnfn convert_long16_sat_rtn(long16);
-long16 __ovld __cnfn convert_long16(long16);
-long16 __ovld __cnfn convert_long16_sat(long16);
-long16 __ovld __cnfn convert_long16_rte(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rte(ulong16);
-long16 __ovld __cnfn convert_long16_rtz(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rtz(ulong16);
-long16 __ovld __cnfn convert_long16_rtp(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rtp(ulong16);
-long16 __ovld __cnfn convert_long16_rtn(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rtn(ulong16);
-long16 __ovld __cnfn convert_long16(ulong16);
-long16 __ovld __cnfn convert_long16_sat(ulong16);
-long16 __ovld __cnfn convert_long16_rte(float16);
-long16 __ovld __cnfn convert_long16_sat_rte(float16);
-long16 __ovld __cnfn convert_long16_rtz(float16);
-long16 __ovld __cnfn convert_long16_sat_rtz(float16);
-long16 __ovld __cnfn convert_long16_rtp(float16);
-long16 __ovld __cnfn convert_long16_sat_rtp(float16);
-long16 __ovld __cnfn convert_long16_rtn(float16);
-long16 __ovld __cnfn convert_long16_sat_rtn(float16);
-long16 __ovld __cnfn convert_long16(float16);
-long16 __ovld __cnfn convert_long16_sat(float16);
-ulong16 __ovld __cnfn convert_ulong16_rte(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16);
-ulong16 __ovld __cnfn convert_ulong16(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat(char16);
-ulong16 __ovld __cnfn convert_ulong16_rte(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uchar16);
-ulong16 __ovld __cnfn convert_ulong16(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rte(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(short16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(short16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(short16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(short16);
-ulong16 __ovld __cnfn convert_ulong16(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat(short16);
-ulong16 __ovld __cnfn convert_ulong16_rte(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ushort16);
-ulong16 __ovld __cnfn convert_ulong16(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rte(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(int16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(int16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(int16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(int16);
-ulong16 __ovld __cnfn convert_ulong16(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat(int16);
-ulong16 __ovld __cnfn convert_ulong16_rte(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uint16);
-ulong16 __ovld __cnfn convert_ulong16(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rte(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(long16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(long16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(long16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(long16);
-ulong16 __ovld __cnfn convert_ulong16(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat(long16);
-ulong16 __ovld __cnfn convert_ulong16_rte(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ulong16);
-ulong16 __ovld __cnfn convert_ulong16(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rte(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(float16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(float16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(float16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(float16);
-ulong16 __ovld __cnfn convert_ulong16(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat(float16);
-float16 __ovld __cnfn convert_float16_rte(char16);
-float16 __ovld __cnfn convert_float16_rtz(char16);
-float16 __ovld __cnfn convert_float16_rtp(char16);
-float16 __ovld __cnfn convert_float16_rtn(char16);
-float16 __ovld __cnfn convert_float16(char16);
-float16 __ovld __cnfn convert_float16_rte(uchar16);
-float16 __ovld __cnfn convert_float16_rtz(uchar16);
-float16 __ovld __cnfn convert_float16_rtp(uchar16);
-float16 __ovld __cnfn convert_float16_rtn(uchar16);
-float16 __ovld __cnfn convert_float16(uchar16);
-float16 __ovld __cnfn convert_float16_rte(short16);
-float16 __ovld __cnfn convert_float16_rtz(short16);
-float16 __ovld __cnfn convert_float16_rtp(short16);
-float16 __ovld __cnfn convert_float16_rtn(short16);
-float16 __ovld __cnfn convert_float16(short16);
-float16 __ovld __cnfn convert_float16_rte(ushort16);
-float16 __ovld __cnfn convert_float16_rtz(ushort16);
-float16 __ovld __cnfn convert_float16_rtp(ushort16);
-float16 __ovld __cnfn convert_float16_rtn(ushort16);
-float16 __ovld __cnfn convert_float16(ushort16);
-float16 __ovld __cnfn convert_float16_rte(int16);
-float16 __ovld __cnfn convert_float16_rtz(int16);
-float16 __ovld __cnfn convert_float16_rtp(int16);
-float16 __ovld __cnfn convert_float16_rtn(int16);
-float16 __ovld __cnfn convert_float16(int16);
-float16 __ovld __cnfn convert_float16_rte(uint16);
-float16 __ovld __cnfn convert_float16_rtz(uint16);
-float16 __ovld __cnfn convert_float16_rtp(uint16);
-float16 __ovld __cnfn convert_float16_rtn(uint16);
-float16 __ovld __cnfn convert_float16(uint16);
-float16 __ovld __cnfn convert_float16_rte(long16);
-float16 __ovld __cnfn convert_float16_rtz(long16);
-float16 __ovld __cnfn convert_float16_rtp(long16);
-float16 __ovld __cnfn convert_float16_rtn(long16);
-float16 __ovld __cnfn convert_float16(long16);
-float16 __ovld __cnfn convert_float16_rte(ulong16);
-float16 __ovld __cnfn convert_float16_rtz(ulong16);
-float16 __ovld __cnfn convert_float16_rtp(ulong16);
-float16 __ovld __cnfn convert_float16_rtn(ulong16);
-float16 __ovld __cnfn convert_float16(ulong16);
-float16 __ovld __cnfn convert_float16_rte(float16);
-float16 __ovld __cnfn convert_float16_rtz(float16);
-float16 __ovld __cnfn convert_float16_rtp(float16);
-float16 __ovld __cnfn convert_float16_rtn(float16);
-float16 __ovld __cnfn convert_float16(float16);
-
-// Conversions with double data type parameters or return value.
-
-#ifdef cl_khr_fp64
-#pragma OPENCL EXTENSION cl_khr_fp64 : enable
-char __ovld __cnfn convert_char(double);
-char __ovld __cnfn convert_char_rte(double);
-char __ovld __cnfn convert_char_rtn(double);
-char __ovld __cnfn convert_char_rtp(double);
-char __ovld __cnfn convert_char_rtz(double);
-char __ovld __cnfn convert_char_sat(double);
-char __ovld __cnfn convert_char_sat_rte(double);
-char __ovld __cnfn convert_char_sat_rtn(double);
-char __ovld __cnfn convert_char_sat_rtp(double);
-char __ovld __cnfn convert_char_sat_rtz(double);
-char2 __ovld __cnfn convert_char2(double2);
-char2 __ovld __cnfn convert_char2_rte(double2);
-char2 __ovld __cnfn convert_char2_rtn(double2);
-char2 __ovld __cnfn convert_char2_rtp(double2);
-char2 __ovld __cnfn convert_char2_rtz(double2);
-char2 __ovld __cnfn convert_char2_sat(double2);
-char2 __ovld __cnfn convert_char2_sat_rte(double2);
-char2 __ovld __cnfn convert_char2_sat_rtn(double2);
-char2 __ovld __cnfn convert_char2_sat_rtp(double2);
-char2 __ovld __cnfn convert_char2_sat_rtz(double2);
-char3 __ovld __cnfn convert_char3(double3);
-char3 __ovld __cnfn convert_char3_rte(double3);
-char3 __ovld __cnfn convert_char3_rtn(double3);
-char3 __ovld __cnfn convert_char3_rtp(double3);
-char3 __ovld __cnfn convert_char3_rtz(double3);
-char3 __ovld __cnfn convert_char3_sat(double3);
-char3 __ovld __cnfn convert_char3_sat_rte(double3);
-char3 __ovld __cnfn convert_char3_sat_rtn(double3);
-char3 __ovld __cnfn convert_char3_sat_rtp(double3);
-char3 __ovld __cnfn convert_char3_sat_rtz(double3);
-char4 __ovld __cnfn convert_char4(double4);
-char4 __ovld __cnfn convert_char4_rte(double4);
-char4 __ovld __cnfn convert_char4_rtn(double4);
-char4 __ovld __cnfn convert_char4_rtp(double4);
-char4 __ovld __cnfn convert_char4_rtz(double4);
-char4 __ovld __cnfn convert_char4_sat(double4);
-char4 __ovld __cnfn convert_char4_sat_rte(double4);
-char4 __ovld __cnfn convert_char4_sat_rtn(double4);
-char4 __ovld __cnfn convert_char4_sat_rtp(double4);
-char4 __ovld __cnfn convert_char4_sat_rtz(double4);
-char8 __ovld __cnfn convert_char8(double8);
-char8 __ovld __cnfn convert_char8_rte(double8);
-char8 __ovld __cnfn convert_char8_rtn(double8);
-char8 __ovld __cnfn convert_char8_rtp(double8);
-char8 __ovld __cnfn convert_char8_rtz(double8);
-char8 __ovld __cnfn convert_char8_sat(double8);
-char8 __ovld __cnfn convert_char8_sat_rte(double8);
-char8 __ovld __cnfn convert_char8_sat_rtn(double8);
-char8 __ovld __cnfn convert_char8_sat_rtp(double8);
-char8 __ovld __cnfn convert_char8_sat_rtz(double8);
-char16 __ovld __cnfn convert_char16(double16);
-char16 __ovld __cnfn convert_char16_rte(double16);
-char16 __ovld __cnfn convert_char16_rtn(double16);
-char16 __ovld __cnfn convert_char16_rtp(double16);
-char16 __ovld __cnfn convert_char16_rtz(double16);
-char16 __ovld __cnfn convert_char16_sat(double16);
-char16 __ovld __cnfn convert_char16_sat_rte(double16);
-char16 __ovld __cnfn convert_char16_sat_rtn(double16);
-char16 __ovld __cnfn convert_char16_sat_rtp(double16);
-char16 __ovld __cnfn convert_char16_sat_rtz(double16);
-
-uchar __ovld __cnfn convert_uchar(double);
-uchar __ovld __cnfn convert_uchar_rte(double);
-uchar __ovld __cnfn convert_uchar_rtn(double);
-uchar __ovld __cnfn convert_uchar_rtp(double);
-uchar __ovld __cnfn convert_uchar_rtz(double);
-uchar __ovld __cnfn convert_uchar_sat(double);
-uchar __ovld __cnfn convert_uchar_sat_rte(double);
-uchar __ovld __cnfn convert_uchar_sat_rtn(double);
-uchar __ovld __cnfn convert_uchar_sat_rtp(double);
-uchar __ovld __cnfn convert_uchar_sat_rtz(double);
-uchar2 __ovld __cnfn convert_uchar2(double2);
-uchar2 __ovld __cnfn convert_uchar2_rte(double2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(double2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(double2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(double2);
-uchar3 __ovld __cnfn convert_uchar3(double3);
-uchar3 __ovld __cnfn convert_uchar3_rte(double3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(double3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(double3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(double3);
-uchar4 __ovld __cnfn convert_uchar4(double4);
-uchar4 __ovld __cnfn convert_uchar4_rte(double4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(double4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(double4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(double4);
-uchar8 __ovld __cnfn convert_uchar8(double8);
-uchar8 __ovld __cnfn convert_uchar8_rte(double8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(double8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(double8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(double8);
-uchar16 __ovld __cnfn convert_uchar16(double16);
-uchar16 __ovld __cnfn convert_uchar16_rte(double16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(double16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(double16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(double16);
-
-short __ovld __cnfn convert_short(double);
-short __ovld __cnfn convert_short_rte(double);
-short __ovld __cnfn convert_short_rtn(double);
-short __ovld __cnfn convert_short_rtp(double);
-short __ovld __cnfn convert_short_rtz(double);
-short __ovld __cnfn convert_short_sat(double);
-short __ovld __cnfn convert_short_sat_rte(double);
-short __ovld __cnfn convert_short_sat_rtn(double);
-short __ovld __cnfn convert_short_sat_rtp(double);
-short __ovld __cnfn convert_short_sat_rtz(double);
-short2 __ovld __cnfn convert_short2(double2);
-short2 __ovld __cnfn convert_short2_rte(double2);
-short2 __ovld __cnfn convert_short2_rtn(double2);
-short2 __ovld __cnfn convert_short2_rtp(double2);
-short2 __ovld __cnfn convert_short2_rtz(double2);
-short2 __ovld __cnfn convert_short2_sat(double2);
-short2 __ovld __cnfn convert_short2_sat_rte(double2);
-short2 __ovld __cnfn convert_short2_sat_rtn(double2);
-short2 __ovld __cnfn convert_short2_sat_rtp(double2);
-short2 __ovld __cnfn convert_short2_sat_rtz(double2);
-short3 __ovld __cnfn convert_short3(double3);
-short3 __ovld __cnfn convert_short3_rte(double3);
-short3 __ovld __cnfn convert_short3_rtn(double3);
-short3 __ovld __cnfn convert_short3_rtp(double3);
-short3 __ovld __cnfn convert_short3_rtz(double3);
-short3 __ovld __cnfn convert_short3_sat(double3);
-short3 __ovld __cnfn convert_short3_sat_rte(double3);
-short3 __ovld __cnfn convert_short3_sat_rtn(double3);
-short3 __ovld __cnfn convert_short3_sat_rtp(double3);
-short3 __ovld __cnfn convert_short3_sat_rtz(double3);
-short4 __ovld __cnfn convert_short4(double4);
-short4 __ovld __cnfn convert_short4_rte(double4);
-short4 __ovld __cnfn convert_short4_rtn(double4);
-short4 __ovld __cnfn convert_short4_rtp(double4);
-short4 __ovld __cnfn convert_short4_rtz(double4);
-short4 __ovld __cnfn convert_short4_sat(double4);
-short4 __ovld __cnfn convert_short4_sat_rte(double4);
-short4 __ovld __cnfn convert_short4_sat_rtn(double4);
-short4 __ovld __cnfn convert_short4_sat_rtp(double4);
-short4 __ovld __cnfn convert_short4_sat_rtz(double4);
-short8 __ovld __cnfn convert_short8(double8);
-short8 __ovld __cnfn convert_short8_rte(double8);
-short8 __ovld __cnfn convert_short8_rtn(double8);
-short8 __ovld __cnfn convert_short8_rtp(double8);
-short8 __ovld __cnfn convert_short8_rtz(double8);
-short8 __ovld __cnfn convert_short8_sat(double8);
-short8 __ovld __cnfn convert_short8_sat_rte(double8);
-short8 __ovld __cnfn convert_short8_sat_rtn(double8);
-short8 __ovld __cnfn convert_short8_sat_rtp(double8);
-short8 __ovld __cnfn convert_short8_sat_rtz(double8);
-short16 __ovld __cnfn convert_short16(double16);
-short16 __ovld __cnfn convert_short16_rte(double16);
-short16 __ovld __cnfn convert_short16_rtn(double16);
-short16 __ovld __cnfn convert_short16_rtp(double16);
-short16 __ovld __cnfn convert_short16_rtz(double16);
-short16 __ovld __cnfn convert_short16_sat(double16);
-short16 __ovld __cnfn convert_short16_sat_rte(double16);
-short16 __ovld __cnfn convert_short16_sat_rtn(double16);
-short16 __ovld __cnfn convert_short16_sat_rtp(double16);
-short16 __ovld __cnfn convert_short16_sat_rtz(double16);
-
-ushort __ovld __cnfn convert_ushort(double);
-ushort __ovld __cnfn convert_ushort_rte(double);
-ushort __ovld __cnfn convert_ushort_rtn(double);
-ushort __ovld __cnfn convert_ushort_rtp(double);
-ushort __ovld __cnfn convert_ushort_rtz(double);
-ushort __ovld __cnfn convert_ushort_sat(double);
-ushort __ovld __cnfn convert_ushort_sat_rte(double);
-ushort __ovld __cnfn convert_ushort_sat_rtn(double);
-ushort __ovld __cnfn convert_ushort_sat_rtp(double);
-ushort __ovld __cnfn convert_ushort_sat_rtz(double);
-ushort2 __ovld __cnfn convert_ushort2(double2);
-ushort2 __ovld __cnfn convert_ushort2_rte(double2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(double2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(double2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(double2);
-ushort3 __ovld __cnfn convert_ushort3(double3);
-ushort3 __ovld __cnfn convert_ushort3_rte(double3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(double3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(double3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(double3);
-ushort4 __ovld __cnfn convert_ushort4(double4);
-ushort4 __ovld __cnfn convert_ushort4_rte(double4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(double4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(double4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(double4);
-ushort8 __ovld __cnfn convert_ushort8(double8);
-ushort8 __ovld __cnfn convert_ushort8_rte(double8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(double8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(double8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(double8);
-ushort16 __ovld __cnfn convert_ushort16(double16);
-ushort16 __ovld __cnfn convert_ushort16_rte(double16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(double16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(double16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(double16);
-
-int __ovld __cnfn convert_int(double);
-int __ovld __cnfn convert_int_rte(double);
-int __ovld __cnfn convert_int_rtn(double);
-int __ovld __cnfn convert_int_rtp(double);
-int __ovld __cnfn convert_int_rtz(double);
-int __ovld __cnfn convert_int_sat(double);
-int __ovld __cnfn convert_int_sat_rte(double);
-int __ovld __cnfn convert_int_sat_rtn(double);
-int __ovld __cnfn convert_int_sat_rtp(double);
-int __ovld __cnfn convert_int_sat_rtz(double);
-int2 __ovld __cnfn convert_int2(double2);
-int2 __ovld __cnfn convert_int2_rte(double2);
-int2 __ovld __cnfn convert_int2_rtn(double2);
-int2 __ovld __cnfn convert_int2_rtp(double2);
-int2 __ovld __cnfn convert_int2_rtz(double2);
-int2 __ovld __cnfn convert_int2_sat(double2);
-int2 __ovld __cnfn convert_int2_sat_rte(double2);
-int2 __ovld __cnfn convert_int2_sat_rtn(double2);
-int2 __ovld __cnfn convert_int2_sat_rtp(double2);
-int2 __ovld __cnfn convert_int2_sat_rtz(double2);
-int3 __ovld __cnfn convert_int3(double3);
-int3 __ovld __cnfn convert_int3_rte(double3);
-int3 __ovld __cnfn convert_int3_rtn(double3);
-int3 __ovld __cnfn convert_int3_rtp(double3);
-int3 __ovld __cnfn convert_int3_rtz(double3);
-int3 __ovld __cnfn convert_int3_sat(double3);
-int3 __ovld __cnfn convert_int3_sat_rte(double3);
-int3 __ovld __cnfn convert_int3_sat_rtn(double3);
-int3 __ovld __cnfn convert_int3_sat_rtp(double3);
-int3 __ovld __cnfn convert_int3_sat_rtz(double3);
-int4 __ovld __cnfn convert_int4(double4);
-int4 __ovld __cnfn convert_int4_rte(double4);
-int4 __ovld __cnfn convert_int4_rtn(double4);
-int4 __ovld __cnfn convert_int4_rtp(double4);
-int4 __ovld __cnfn convert_int4_rtz(double4);
-int4 __ovld __cnfn convert_int4_sat(double4);
-int4 __ovld __cnfn convert_int4_sat_rte(double4);
-int4 __ovld __cnfn convert_int4_sat_rtn(double4);
-int4 __ovld __cnfn convert_int4_sat_rtp(double4);
-int4 __ovld __cnfn convert_int4_sat_rtz(double4);
-int8 __ovld __cnfn convert_int8(double8);
-int8 __ovld __cnfn convert_int8_rte(double8);
-int8 __ovld __cnfn convert_int8_rtn(double8);
-int8 __ovld __cnfn convert_int8_rtp(double8);
-int8 __ovld __cnfn convert_int8_rtz(double8);
-int8 __ovld __cnfn convert_int8_sat(double8);
-int8 __ovld __cnfn convert_int8_sat_rte(double8);
-int8 __ovld __cnfn convert_int8_sat_rtn(double8);
-int8 __ovld __cnfn convert_int8_sat_rtp(double8);
-int8 __ovld __cnfn convert_int8_sat_rtz(double8);
-int16 __ovld __cnfn convert_int16(double16);
-int16 __ovld __cnfn convert_int16_rte(double16);
-int16 __ovld __cnfn convert_int16_rtn(double16);
-int16 __ovld __cnfn convert_int16_rtp(double16);
-int16 __ovld __cnfn convert_int16_rtz(double16);
-int16 __ovld __cnfn convert_int16_sat(double16);
-int16 __ovld __cnfn convert_int16_sat_rte(double16);
-int16 __ovld __cnfn convert_int16_sat_rtn(double16);
-int16 __ovld __cnfn convert_int16_sat_rtp(double16);
-int16 __ovld __cnfn convert_int16_sat_rtz(double16);
-
-uint __ovld __cnfn convert_uint(double);
-uint __ovld __cnfn convert_uint_rte(double);
-uint __ovld __cnfn convert_uint_rtn(double);
-uint __ovld __cnfn convert_uint_rtp(double);
-uint __ovld __cnfn convert_uint_rtz(double);
-uint __ovld __cnfn convert_uint_sat(double);
-uint __ovld __cnfn convert_uint_sat_rte(double);
-uint __ovld __cnfn convert_uint_sat_rtn(double);
-uint __ovld __cnfn convert_uint_sat_rtp(double);
-uint __ovld __cnfn convert_uint_sat_rtz(double);
-uint2 __ovld __cnfn convert_uint2(double2);
-uint2 __ovld __cnfn convert_uint2_rte(double2);
-uint2 __ovld __cnfn convert_uint2_rtn(double2);
-uint2 __ovld __cnfn convert_uint2_rtp(double2);
-uint2 __ovld __cnfn convert_uint2_rtz(double2);
-uint2 __ovld __cnfn convert_uint2_sat(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(double2);
-uint3 __ovld __cnfn convert_uint3(double3);
-uint3 __ovld __cnfn convert_uint3_rte(double3);
-uint3 __ovld __cnfn convert_uint3_rtn(double3);
-uint3 __ovld __cnfn convert_uint3_rtp(double3);
-uint3 __ovld __cnfn convert_uint3_rtz(double3);
-uint3 __ovld __cnfn convert_uint3_sat(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(double3);
-uint4 __ovld __cnfn convert_uint4(double4);
-uint4 __ovld __cnfn convert_uint4_rte(double4);
-uint4 __ovld __cnfn convert_uint4_rtn(double4);
-uint4 __ovld __cnfn convert_uint4_rtp(double4);
-uint4 __ovld __cnfn convert_uint4_rtz(double4);
-uint4 __ovld __cnfn convert_uint4_sat(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(double4);
-uint8 __ovld __cnfn convert_uint8(double8);
-uint8 __ovld __cnfn convert_uint8_rte(double8);
-uint8 __ovld __cnfn convert_uint8_rtn(double8);
-uint8 __ovld __cnfn convert_uint8_rtp(double8);
-uint8 __ovld __cnfn convert_uint8_rtz(double8);
-uint8 __ovld __cnfn convert_uint8_sat(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(double8);
-uint16 __ovld __cnfn convert_uint16(double16);
-uint16 __ovld __cnfn convert_uint16_rte(double16);
-uint16 __ovld __cnfn convert_uint16_rtn(double16);
-uint16 __ovld __cnfn convert_uint16_rtp(double16);
-uint16 __ovld __cnfn convert_uint16_rtz(double16);
-uint16 __ovld __cnfn convert_uint16_sat(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(double16);
-
-long __ovld __cnfn convert_long(double);
-long __ovld __cnfn convert_long_rte(double);
-long __ovld __cnfn convert_long_rtn(double);
-long __ovld __cnfn convert_long_rtp(double);
-long __ovld __cnfn convert_long_rtz(double);
-long __ovld __cnfn convert_long_sat(double);
-long __ovld __cnfn convert_long_sat_rte(double);
-long __ovld __cnfn convert_long_sat_rtn(double);
-long __ovld __cnfn convert_long_sat_rtp(double);
-long __ovld __cnfn convert_long_sat_rtz(double);
-long2 __ovld __cnfn convert_long2(double2);
-long2 __ovld __cnfn convert_long2_rte(double2);
-long2 __ovld __cnfn convert_long2_rtn(double2);
-long2 __ovld __cnfn convert_long2_rtp(double2);
-long2 __ovld __cnfn convert_long2_rtz(double2);
-long2 __ovld __cnfn convert_long2_sat(double2);
-long2 __ovld __cnfn convert_long2_sat_rte(double2);
-long2 __ovld __cnfn convert_long2_sat_rtn(double2);
-long2 __ovld __cnfn convert_long2_sat_rtp(double2);
-long2 __ovld __cnfn convert_long2_sat_rtz(double2);
-long3 __ovld __cnfn convert_long3(double3);
-long3 __ovld __cnfn convert_long3_rte(double3);
-long3 __ovld __cnfn convert_long3_rtn(double3);
-long3 __ovld __cnfn convert_long3_rtp(double3);
-long3 __ovld __cnfn convert_long3_rtz(double3);
-long3 __ovld __cnfn convert_long3_sat(double3);
-long3 __ovld __cnfn convert_long3_sat_rte(double3);
-long3 __ovld __cnfn convert_long3_sat_rtn(double3);
-long3 __ovld __cnfn convert_long3_sat_rtp(double3);
-long3 __ovld __cnfn convert_long3_sat_rtz(double3);
-long4 __ovld __cnfn convert_long4(double4);
-long4 __ovld __cnfn convert_long4_rte(double4);
-long4 __ovld __cnfn convert_long4_rtn(double4);
-long4 __ovld __cnfn convert_long4_rtp(double4);
-long4 __ovld __cnfn convert_long4_rtz(double4);
-long4 __ovld __cnfn convert_long4_sat(double4);
-long4 __ovld __cnfn convert_long4_sat_rte(double4);
-long4 __ovld __cnfn convert_long4_sat_rtn(double4);
-long4 __ovld __cnfn convert_long4_sat_rtp(double4);
-long4 __ovld __cnfn convert_long4_sat_rtz(double4);
-long8 __ovld __cnfn convert_long8(double8);
-long8 __ovld __cnfn convert_long8_rte(double8);
-long8 __ovld __cnfn convert_long8_rtn(double8);
-long8 __ovld __cnfn convert_long8_rtp(double8);
-long8 __ovld __cnfn convert_long8_rtz(double8);
-long8 __ovld __cnfn convert_long8_sat(double8);
-long8 __ovld __cnfn convert_long8_sat_rte(double8);
-long8 __ovld __cnfn convert_long8_sat_rtn(double8);
-long8 __ovld __cnfn convert_long8_sat_rtp(double8);
-long8 __ovld __cnfn convert_long8_sat_rtz(double8);
-long16 __ovld __cnfn convert_long16(double16);
-long16 __ovld __cnfn convert_long16_rte(double16);
-long16 __ovld __cnfn convert_long16_rtn(double16);
-long16 __ovld __cnfn convert_long16_rtp(double16);
-long16 __ovld __cnfn convert_long16_rtz(double16);
-long16 __ovld __cnfn convert_long16_sat(double16);
-long16 __ovld __cnfn convert_long16_sat_rte(double16);
-long16 __ovld __cnfn convert_long16_sat_rtn(double16);
-long16 __ovld __cnfn convert_long16_sat_rtp(double16);
-long16 __ovld __cnfn convert_long16_sat_rtz(double16);
-
-ulong __ovld __cnfn convert_ulong(double);
-ulong __ovld __cnfn convert_ulong_rte(double);
-ulong __ovld __cnfn convert_ulong_rtn(double);
-ulong __ovld __cnfn convert_ulong_rtp(double);
-ulong __ovld __cnfn convert_ulong_rtz(double);
-ulong __ovld __cnfn convert_ulong_sat(double);
-ulong __ovld __cnfn convert_ulong_sat_rte(double);
-ulong __ovld __cnfn convert_ulong_sat_rtn(double);
-ulong __ovld __cnfn convert_ulong_sat_rtp(double);
-ulong __ovld __cnfn convert_ulong_sat_rtz(double);
-ulong2 __ovld __cnfn convert_ulong2(double2);
-ulong2 __ovld __cnfn convert_ulong2_rte(double2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(double2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(double2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(double2);
-ulong3 __ovld __cnfn convert_ulong3(double3);
-ulong3 __ovld __cnfn convert_ulong3_rte(double3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(double3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(double3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(double3);
-ulong4 __ovld __cnfn convert_ulong4(double4);
-ulong4 __ovld __cnfn convert_ulong4_rte(double4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(double4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(double4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(double4);
-ulong8 __ovld __cnfn convert_ulong8(double8);
-ulong8 __ovld __cnfn convert_ulong8_rte(double8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(double8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(double8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(double8);
-ulong16 __ovld __cnfn convert_ulong16(double16);
-ulong16 __ovld __cnfn convert_ulong16_rte(double16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(double16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(double16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(double16);
-
-float __ovld __cnfn convert_float(double);
-float __ovld __cnfn convert_float_rte(double);
-float __ovld __cnfn convert_float_rtn(double);
-float __ovld __cnfn convert_float_rtp(double);
-float __ovld __cnfn convert_float_rtz(double);
-float2 __ovld __cnfn convert_float2(double2);
-float2 __ovld __cnfn convert_float2_rte(double2);
-float2 __ovld __cnfn convert_float2_rtn(double2);
-float2 __ovld __cnfn convert_float2_rtp(double2);
-float2 __ovld __cnfn convert_float2_rtz(double2);
-float3 __ovld __cnfn convert_float3(double3);
-float3 __ovld __cnfn convert_float3_rte(double3);
-float3 __ovld __cnfn convert_float3_rtn(double3);
-float3 __ovld __cnfn convert_float3_rtp(double3);
-float3 __ovld __cnfn convert_float3_rtz(double3);
-float4 __ovld __cnfn convert_float4(double4);
-float4 __ovld __cnfn convert_float4_rte(double4);
-float4 __ovld __cnfn convert_float4_rtn(double4);
-float4 __ovld __cnfn convert_float4_rtp(double4);
-float4 __ovld __cnfn convert_float4_rtz(double4);
-float8 __ovld __cnfn convert_float8(double8);
-float8 __ovld __cnfn convert_float8_rte(double8);
-float8 __ovld __cnfn convert_float8_rtn(double8);
-float8 __ovld __cnfn convert_float8_rtp(double8);
-float8 __ovld __cnfn convert_float8_rtz(double8);
-float16 __ovld __cnfn convert_float16(double16);
-float16 __ovld __cnfn convert_float16_rte(double16);
-float16 __ovld __cnfn convert_float16_rtn(double16);
-float16 __ovld __cnfn convert_float16_rtp(double16);
-float16 __ovld __cnfn convert_float16_rtz(double16);
-
-double __ovld __cnfn convert_double(char);
-double __ovld __cnfn convert_double(double);
-double __ovld __cnfn convert_double(float);
-double __ovld __cnfn convert_double(int);
-double __ovld __cnfn convert_double(long);
-double __ovld __cnfn convert_double(short);
-double __ovld __cnfn convert_double(uchar);
-double __ovld __cnfn convert_double(uint);
-double __ovld __cnfn convert_double(ulong);
-double __ovld __cnfn convert_double(ushort);
-double __ovld __cnfn convert_double_rte(char);
-double __ovld __cnfn convert_double_rte(double);
-double __ovld __cnfn convert_double_rte(float);
-double __ovld __cnfn convert_double_rte(int);
-double __ovld __cnfn convert_double_rte(long);
-double __ovld __cnfn convert_double_rte(short);
-double __ovld __cnfn convert_double_rte(uchar);
-double __ovld __cnfn convert_double_rte(uint);
-double __ovld __cnfn convert_double_rte(ulong);
-double __ovld __cnfn convert_double_rte(ushort);
-double __ovld __cnfn convert_double_rtn(char);
-double __ovld __cnfn convert_double_rtn(double);
-double __ovld __cnfn convert_double_rtn(float);
-double __ovld __cnfn convert_double_rtn(int);
-double __ovld __cnfn convert_double_rtn(long);
-double __ovld __cnfn convert_double_rtn(short);
-double __ovld __cnfn convert_double_rtn(uchar);
-double __ovld __cnfn convert_double_rtn(uint);
-double __ovld __cnfn convert_double_rtn(ulong);
-double __ovld __cnfn convert_double_rtn(ushort);
-double __ovld __cnfn convert_double_rtp(char);
-double __ovld __cnfn convert_double_rtp(double);
-double __ovld __cnfn convert_double_rtp(float);
-double __ovld __cnfn convert_double_rtp(int);
-double __ovld __cnfn convert_double_rtp(long);
-double __ovld __cnfn convert_double_rtp(short);
-double __ovld __cnfn convert_double_rtp(uchar);
-double __ovld __cnfn convert_double_rtp(uint);
-double __ovld __cnfn convert_double_rtp(ulong);
-double __ovld __cnfn convert_double_rtp(ushort);
-double __ovld __cnfn convert_double_rtz(char);
-double __ovld __cnfn convert_double_rtz(double);
-double __ovld __cnfn convert_double_rtz(float);
-double __ovld __cnfn convert_double_rtz(int);
-double __ovld __cnfn convert_double_rtz(long);
-double __ovld __cnfn convert_double_rtz(short);
-double __ovld __cnfn convert_double_rtz(uchar);
-double __ovld __cnfn convert_double_rtz(uint);
-double __ovld __cnfn convert_double_rtz(ulong);
-double __ovld __cnfn convert_double_rtz(ushort);
-double2 __ovld __cnfn convert_double2(char2);
-double2 __ovld __cnfn convert_double2(double2);
-double2 __ovld __cnfn convert_double2(float2);
-double2 __ovld __cnfn convert_double2(int2);
-double2 __ovld __cnfn convert_double2(long2);
-double2 __ovld __cnfn convert_double2(short2);
-double2 __ovld __cnfn convert_double2(uchar2);
-double2 __ovld __cnfn convert_double2(uint2);
-double2 __ovld __cnfn convert_double2(ulong2);
-double2 __ovld __cnfn convert_double2(ushort2);
-double2 __ovld __cnfn convert_double2_rte(char2);
-double2 __ovld __cnfn convert_double2_rte(double2);
-double2 __ovld __cnfn convert_double2_rte(float2);
-double2 __ovld __cnfn convert_double2_rte(int2);
-double2 __ovld __cnfn convert_double2_rte(long2);
-double2 __ovld __cnfn convert_double2_rte(short2);
-double2 __ovld __cnfn convert_double2_rte(uchar2);
-double2 __ovld __cnfn convert_double2_rte(uint2);
-double2 __ovld __cnfn convert_double2_rte(ulong2);
-double2 __ovld __cnfn convert_double2_rte(ushort2);
-double2 __ovld __cnfn convert_double2_rtn(char2);
-double2 __ovld __cnfn convert_double2_rtn(double2);
-double2 __ovld __cnfn convert_double2_rtn(float2);
-double2 __ovld __cnfn convert_double2_rtn(int2);
-double2 __ovld __cnfn convert_double2_rtn(long2);
-double2 __ovld __cnfn convert_double2_rtn(short2);
-double2 __ovld __cnfn convert_double2_rtn(uchar2);
-double2 __ovld __cnfn convert_double2_rtn(uint2);
-double2 __ovld __cnfn convert_double2_rtn(ulong2);
-double2 __ovld __cnfn convert_double2_rtn(ushort2);
-double2 __ovld __cnfn convert_double2_rtp(char2);
-double2 __ovld __cnfn convert_double2_rtp(double2);
-double2 __ovld __cnfn convert_double2_rtp(float2);
-double2 __ovld __cnfn convert_double2_rtp(int2);
-double2 __ovld __cnfn convert_double2_rtp(long2);
-double2 __ovld __cnfn convert_double2_rtp(short2);
-double2 __ovld __cnfn convert_double2_rtp(uchar2);
-double2 __ovld __cnfn convert_double2_rtp(uint2);
-double2 __ovld __cnfn convert_double2_rtp(ulong2);
-double2 __ovld __cnfn convert_double2_rtp(ushort2);
-double2 __ovld __cnfn convert_double2_rtz(char2);
-double2 __ovld __cnfn convert_double2_rtz(double2);
-double2 __ovld __cnfn convert_double2_rtz(float2);
-double2 __ovld __cnfn convert_double2_rtz(int2);
-double2 __ovld __cnfn convert_double2_rtz(long2);
-double2 __ovld __cnfn convert_double2_rtz(short2);
-double2 __ovld __cnfn convert_double2_rtz(uchar2);
-double2 __ovld __cnfn convert_double2_rtz(uint2);
-double2 __ovld __cnfn convert_double2_rtz(ulong2);
-double2 __ovld __cnfn convert_double2_rtz(ushort2);
-double3 __ovld __cnfn convert_double3(char3);
-double3 __ovld __cnfn convert_double3(double3);
-double3 __ovld __cnfn convert_double3(float3);
-double3 __ovld __cnfn convert_double3(int3);
-double3 __ovld __cnfn convert_double3(long3);
-double3 __ovld __cnfn convert_double3(short3);
-double3 __ovld __cnfn convert_double3(uchar3);
-double3 __ovld __cnfn convert_double3(uint3);
-double3 __ovld __cnfn convert_double3(ulong3);
-double3 __ovld __cnfn convert_double3(ushort3);
-double3 __ovld __cnfn convert_double3_rte(char3);
-double3 __ovld __cnfn convert_double3_rte(double3);
-double3 __ovld __cnfn convert_double3_rte(float3);
-double3 __ovld __cnfn convert_double3_rte(int3);
-double3 __ovld __cnfn convert_double3_rte(long3);
-double3 __ovld __cnfn convert_double3_rte(short3);
-double3 __ovld __cnfn convert_double3_rte(uchar3);
-double3 __ovld __cnfn convert_double3_rte(uint3);
-double3 __ovld __cnfn convert_double3_rte(ulong3);
-double3 __ovld __cnfn convert_double3_rte(ushort3);
-double3 __ovld __cnfn convert_double3_rtn(char3);
-double3 __ovld __cnfn convert_double3_rtn(double3);
-double3 __ovld __cnfn convert_double3_rtn(float3);
-double3 __ovld __cnfn convert_double3_rtn(int3);
-double3 __ovld __cnfn convert_double3_rtn(long3);
-double3 __ovld __cnfn convert_double3_rtn(short3);
-double3 __ovld __cnfn convert_double3_rtn(uchar3);
-double3 __ovld __cnfn convert_double3_rtn(uint3);
-double3 __ovld __cnfn convert_double3_rtn(ulong3);
-double3 __ovld __cnfn convert_double3_rtn(ushort3);
-double3 __ovld __cnfn convert_double3_rtp(char3);
-double3 __ovld __cnfn convert_double3_rtp(double3);
-double3 __ovld __cnfn convert_double3_rtp(float3);
-double3 __ovld __cnfn convert_double3_rtp(int3);
-double3 __ovld __cnfn convert_double3_rtp(long3);
-double3 __ovld __cnfn convert_double3_rtp(short3);
-double3 __ovld __cnfn convert_double3_rtp(uchar3);
-double3 __ovld __cnfn convert_double3_rtp(uint3);
-double3 __ovld __cnfn convert_double3_rtp(ulong3);
-double3 __ovld __cnfn convert_double3_rtp(ushort3);
-double3 __ovld __cnfn convert_double3_rtz(char3);
-double3 __ovld __cnfn convert_double3_rtz(double3);
-double3 __ovld __cnfn convert_double3_rtz(float3);
-double3 __ovld __cnfn convert_double3_rtz(int3);
-double3 __ovld __cnfn convert_double3_rtz(long3);
-double3 __ovld __cnfn convert_double3_rtz(short3);
-double3 __ovld __cnfn convert_double3_rtz(uchar3);
-double3 __ovld __cnfn convert_double3_rtz(uint3);
-double3 __ovld __cnfn convert_double3_rtz(ulong3);
-double3 __ovld __cnfn convert_double3_rtz(ushort3);
-double4 __ovld __cnfn convert_double4(char4);
-double4 __ovld __cnfn convert_double4(double4);
-double4 __ovld __cnfn convert_double4(float4);
-double4 __ovld __cnfn convert_double4(int4);
-double4 __ovld __cnfn convert_double4(long4);
-double4 __ovld __cnfn convert_double4(short4);
-double4 __ovld __cnfn convert_double4(uchar4);
-double4 __ovld __cnfn convert_double4(uint4);
-double4 __ovld __cnfn convert_double4(ulong4);
-double4 __ovld __cnfn convert_double4(ushort4);
-double4 __ovld __cnfn convert_double4_rte(char4);
-double4 __ovld __cnfn convert_double4_rte(double4);
-double4 __ovld __cnfn convert_double4_rte(float4);
-double4 __ovld __cnfn convert_double4_rte(int4);
-double4 __ovld __cnfn convert_double4_rte(long4);
-double4 __ovld __cnfn convert_double4_rte(short4);
-double4 __ovld __cnfn convert_double4_rte(uchar4);
-double4 __ovld __cnfn convert_double4_rte(uint4);
-double4 __ovld __cnfn convert_double4_rte(ulong4);
-double4 __ovld __cnfn convert_double4_rte(ushort4);
-double4 __ovld __cnfn convert_double4_rtn(char4);
-double4 __ovld __cnfn convert_double4_rtn(double4);
-double4 __ovld __cnfn convert_double4_rtn(float4);
-double4 __ovld __cnfn convert_double4_rtn(int4);
-double4 __ovld __cnfn convert_double4_rtn(long4);
-double4 __ovld __cnfn convert_double4_rtn(short4);
-double4 __ovld __cnfn convert_double4_rtn(uchar4);
-double4 __ovld __cnfn convert_double4_rtn(uint4);
-double4 __ovld __cnfn convert_double4_rtn(ulong4);
-double4 __ovld __cnfn convert_double4_rtn(ushort4);
-double4 __ovld __cnfn convert_double4_rtp(char4);
-double4 __ovld __cnfn convert_double4_rtp(double4);
-double4 __ovld __cnfn convert_double4_rtp(float4);
-double4 __ovld __cnfn convert_double4_rtp(int4);
-double4 __ovld __cnfn convert_double4_rtp(long4);
-double4 __ovld __cnfn convert_double4_rtp(short4);
-double4 __ovld __cnfn convert_double4_rtp(uchar4);
-double4 __ovld __cnfn convert_double4_rtp(uint4);
-double4 __ovld __cnfn convert_double4_rtp(ulong4);
-double4 __ovld __cnfn convert_double4_rtp(ushort4);
-double4 __ovld __cnfn convert_double4_rtz(char4);
-double4 __ovld __cnfn convert_double4_rtz(double4);
-double4 __ovld __cnfn convert_double4_rtz(float4);
-double4 __ovld __cnfn convert_double4_rtz(int4);
-double4 __ovld __cnfn convert_double4_rtz(long4);
-double4 __ovld __cnfn convert_double4_rtz(short4);
-double4 __ovld __cnfn convert_double4_rtz(uchar4);
-double4 __ovld __cnfn convert_double4_rtz(uint4);
-double4 __ovld __cnfn convert_double4_rtz(ulong4);
-double4 __ovld __cnfn convert_double4_rtz(ushort4);
-double8 __ovld __cnfn convert_double8(char8);
-double8 __ovld __cnfn convert_double8(double8);
-double8 __ovld __cnfn convert_double8(float8);
-double8 __ovld __cnfn convert_double8(int8);
-double8 __ovld __cnfn convert_double8(long8);
-double8 __ovld __cnfn convert_double8(short8);
-double8 __ovld __cnfn convert_double8(uchar8);
-double8 __ovld __cnfn convert_double8(uint8);
-double8 __ovld __cnfn convert_double8(ulong8);
-double8 __ovld __cnfn convert_double8(ushort8);
-double8 __ovld __cnfn convert_double8_rte(char8);
-double8 __ovld __cnfn convert_double8_rte(double8);
-double8 __ovld __cnfn convert_double8_rte(float8);
-double8 __ovld __cnfn convert_double8_rte(int8);
-double8 __ovld __cnfn convert_double8_rte(long8);
-double8 __ovld __cnfn convert_double8_rte(short8);
-double8 __ovld __cnfn convert_double8_rte(uchar8);
-double8 __ovld __cnfn convert_double8_rte(uint8);
-double8 __ovld __cnfn convert_double8_rte(ulong8);
-double8 __ovld __cnfn convert_double8_rte(ushort8);
-double8 __ovld __cnfn convert_double8_rtn(char8);
-double8 __ovld __cnfn convert_double8_rtn(double8);
-double8 __ovld __cnfn convert_double8_rtn(float8);
-double8 __ovld __cnfn convert_double8_rtn(int8);
-double8 __ovld __cnfn convert_double8_rtn(long8);
-double8 __ovld __cnfn convert_double8_rtn(short8);
-double8 __ovld __cnfn convert_double8_rtn(uchar8);
-double8 __ovld __cnfn convert_double8_rtn(uint8);
-double8 __ovld __cnfn convert_double8_rtn(ulong8);
-double8 __ovld __cnfn convert_double8_rtn(ushort8);
-double8 __ovld __cnfn convert_double8_rtp(char8);
-double8 __ovld __cnfn convert_double8_rtp(double8);
-double8 __ovld __cnfn convert_double8_rtp(float8);
-double8 __ovld __cnfn convert_double8_rtp(int8);
-double8 __ovld __cnfn convert_double8_rtp(long8);
-double8 __ovld __cnfn convert_double8_rtp(short8);
-double8 __ovld __cnfn convert_double8_rtp(uchar8);
-double8 __ovld __cnfn convert_double8_rtp(uint8);
-double8 __ovld __cnfn convert_double8_rtp(ulong8);
-double8 __ovld __cnfn convert_double8_rtp(ushort8);
-double8 __ovld __cnfn convert_double8_rtz(char8);
-double8 __ovld __cnfn convert_double8_rtz(double8);
-double8 __ovld __cnfn convert_double8_rtz(float8);
-double8 __ovld __cnfn convert_double8_rtz(int8);
-double8 __ovld __cnfn convert_double8_rtz(long8);
-double8 __ovld __cnfn convert_double8_rtz(short8);
-double8 __ovld __cnfn convert_double8_rtz(uchar8);
-double8 __ovld __cnfn convert_double8_rtz(uint8);
-double8 __ovld __cnfn convert_double8_rtz(ulong8);
-double8 __ovld __cnfn convert_double8_rtz(ushort8);
-double16 __ovld __cnfn convert_double16(char16);
-double16 __ovld __cnfn convert_double16(double16);
-double16 __ovld __cnfn convert_double16(float16);
-double16 __ovld __cnfn convert_double16(int16);
-double16 __ovld __cnfn convert_double16(long16);
-double16 __ovld __cnfn convert_double16(short16);
-double16 __ovld __cnfn convert_double16(uchar16);
-double16 __ovld __cnfn convert_double16(uint16);
-double16 __ovld __cnfn convert_double16(ulong16);
-double16 __ovld __cnfn convert_double16(ushort16);
-double16 __ovld __cnfn convert_double16_rte(char16);
-double16 __ovld __cnfn convert_double16_rte(double16);
-double16 __ovld __cnfn convert_double16_rte(float16);
-double16 __ovld __cnfn convert_double16_rte(int16);
-double16 __ovld __cnfn convert_double16_rte(long16);
-double16 __ovld __cnfn convert_double16_rte(short16);
-double16 __ovld __cnfn convert_double16_rte(uchar16);
-double16 __ovld __cnfn convert_double16_rte(uint16);
-double16 __ovld __cnfn convert_double16_rte(ulong16);
-double16 __ovld __cnfn convert_double16_rte(ushort16);
-double16 __ovld __cnfn convert_double16_rtn(char16);
-double16 __ovld __cnfn convert_double16_rtn(double16);
-double16 __ovld __cnfn convert_double16_rtn(float16);
-double16 __ovld __cnfn convert_double16_rtn(int16);
-double16 __ovld __cnfn convert_double16_rtn(long16);
-double16 __ovld __cnfn convert_double16_rtn(short16);
-double16 __ovld __cnfn convert_double16_rtn(uchar16);
-double16 __ovld __cnfn convert_double16_rtn(uint16);
-double16 __ovld __cnfn convert_double16_rtn(ulong16);
-double16 __ovld __cnfn convert_double16_rtn(ushort16);
-double16 __ovld __cnfn convert_double16_rtp(char16);
-double16 __ovld __cnfn convert_double16_rtp(double16);
-double16 __ovld __cnfn convert_double16_rtp(float16);
-double16 __ovld __cnfn convert_double16_rtp(int16);
-double16 __ovld __cnfn convert_double16_rtp(long16);
-double16 __ovld __cnfn convert_double16_rtp(short16);
-double16 __ovld __cnfn convert_double16_rtp(uchar16);
-double16 __ovld __cnfn convert_double16_rtp(uint16);
-double16 __ovld __cnfn convert_double16_rtp(ulong16);
-double16 __ovld __cnfn convert_double16_rtp(ushort16);
-double16 __ovld __cnfn convert_double16_rtz(char16);
-double16 __ovld __cnfn convert_double16_rtz(double16);
-double16 __ovld __cnfn convert_double16_rtz(float16);
-double16 __ovld __cnfn convert_double16_rtz(int16);
-double16 __ovld __cnfn convert_double16_rtz(long16);
-double16 __ovld __cnfn convert_double16_rtz(short16);
-double16 __ovld __cnfn convert_double16_rtz(uchar16);
-double16 __ovld __cnfn convert_double16_rtz(uint16);
-double16 __ovld __cnfn convert_double16_rtz(ulong16);
-double16 __ovld __cnfn convert_double16_rtz(ushort16);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-#pragma OPENCL EXTENSION cl_khr_fp16 : enable
-// Convert half types to non-double types.
-uchar __ovld __cnfn convert_uchar(half);
-uchar __ovld __cnfn convert_uchar_rte(half);
-uchar __ovld __cnfn convert_uchar_rtp(half);
-uchar __ovld __cnfn convert_uchar_rtn(half);
-uchar __ovld __cnfn convert_uchar_rtz(half);
-uchar __ovld __cnfn convert_uchar_sat(half);
-uchar __ovld __cnfn convert_uchar_sat_rte(half);
-uchar __ovld __cnfn convert_uchar_sat_rtp(half);
-uchar __ovld __cnfn convert_uchar_sat_rtn(half);
-uchar __ovld __cnfn convert_uchar_sat_rtz(half);
-uchar2 __ovld __cnfn convert_uchar2(half2);
-uchar2 __ovld __cnfn convert_uchar2_rte(half2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(half2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(half2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(half2);
-uchar3 __ovld __cnfn convert_uchar3(half3);
-uchar3 __ovld __cnfn convert_uchar3_rte(half3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(half3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(half3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(half3);
-uchar4 __ovld __cnfn convert_uchar4(half4);
-uchar4 __ovld __cnfn convert_uchar4_rte(half4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(half4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(half4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(half4);
-uchar8 __ovld __cnfn convert_uchar8(half8);
-uchar8 __ovld __cnfn convert_uchar8_rte(half8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(half8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(half8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(half8);
-uchar16 __ovld __cnfn convert_uchar16(half16);
-uchar16 __ovld __cnfn convert_uchar16_rte(half16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(half16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(half16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(half16);
-ushort __ovld __cnfn convert_ushort(half);
-ushort __ovld __cnfn convert_ushort_rte(half);
-ushort __ovld __cnfn convert_ushort_rtp(half);
-ushort __ovld __cnfn convert_ushort_rtn(half);
-ushort __ovld __cnfn convert_ushort_rtz(half);
-ushort __ovld __cnfn convert_ushort_sat(half);
-ushort __ovld __cnfn convert_ushort_sat_rte(half);
-ushort __ovld __cnfn convert_ushort_sat_rtp(half);
-ushort __ovld __cnfn convert_ushort_sat_rtn(half);
-ushort __ovld __cnfn convert_ushort_sat_rtz(half);
-ushort2 __ovld __cnfn convert_ushort2(half2);
-ushort2 __ovld __cnfn convert_ushort2_rte(half2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(half2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(half2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(half2);
-ushort3 __ovld __cnfn convert_ushort3(half3);
-ushort3 __ovld __cnfn convert_ushort3_rte(half3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(half3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(half3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(half3);
-ushort4 __ovld __cnfn convert_ushort4(half4);
-ushort4 __ovld __cnfn convert_ushort4_rte(half4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(half4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(half4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(half4);
-ushort8 __ovld __cnfn convert_ushort8(half8);
-ushort8 __ovld __cnfn convert_ushort8_rte(half8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(half8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(half8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(half8);
-ushort16 __ovld __cnfn convert_ushort16(half16);
-ushort16 __ovld __cnfn convert_ushort16_rte(half16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(half16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(half16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(half16);
-uint __ovld __cnfn convert_uint(half);
-uint __ovld __cnfn convert_uint_rte(half);
-uint __ovld __cnfn convert_uint_rtp(half);
-uint __ovld __cnfn convert_uint_rtn(half);
-uint __ovld __cnfn convert_uint_rtz(half);
-uint __ovld __cnfn convert_uint_sat(half);
-uint __ovld __cnfn convert_uint_sat_rte(half);
-uint __ovld __cnfn convert_uint_sat_rtp(half);
-uint __ovld __cnfn convert_uint_sat_rtn(half);
-uint __ovld __cnfn convert_uint_sat_rtz(half);
-uint2 __ovld __cnfn convert_uint2(half2);
-uint2 __ovld __cnfn convert_uint2_rte(half2);
-uint2 __ovld __cnfn convert_uint2_rtp(half2);
-uint2 __ovld __cnfn convert_uint2_rtn(half2);
-uint2 __ovld __cnfn convert_uint2_rtz(half2);
-uint2 __ovld __cnfn convert_uint2_sat(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(half2);
-uint3 __ovld __cnfn convert_uint3(half3);
-uint3 __ovld __cnfn convert_uint3_rte(half3);
-uint3 __ovld __cnfn convert_uint3_rtp(half3);
-uint3 __ovld __cnfn convert_uint3_rtn(half3);
-uint3 __ovld __cnfn convert_uint3_rtz(half3);
-uint3 __ovld __cnfn convert_uint3_sat(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(half3);
-uint4 __ovld __cnfn convert_uint4(half4);
-uint4 __ovld __cnfn convert_uint4_rte(half4);
-uint4 __ovld __cnfn convert_uint4_rtp(half4);
-uint4 __ovld __cnfn convert_uint4_rtn(half4);
-uint4 __ovld __cnfn convert_uint4_rtz(half4);
-uint4 __ovld __cnfn convert_uint4_sat(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(half4);
-uint8 __ovld __cnfn convert_uint8(half8);
-uint8 __ovld __cnfn convert_uint8_rte(half8);
-uint8 __ovld __cnfn convert_uint8_rtp(half8);
-uint8 __ovld __cnfn convert_uint8_rtn(half8);
-uint8 __ovld __cnfn convert_uint8_rtz(half8);
-uint8 __ovld __cnfn convert_uint8_sat(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(half8);
-uint16 __ovld __cnfn convert_uint16(half16);
-uint16 __ovld __cnfn convert_uint16_rte(half16);
-uint16 __ovld __cnfn convert_uint16_rtp(half16);
-uint16 __ovld __cnfn convert_uint16_rtn(half16);
-uint16 __ovld __cnfn convert_uint16_rtz(half16);
-uint16 __ovld __cnfn convert_uint16_sat(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(half16);
-ulong __ovld __cnfn convert_ulong(half);
-ulong __ovld __cnfn convert_ulong_rte(half);
-ulong __ovld __cnfn convert_ulong_rtp(half);
-ulong __ovld __cnfn convert_ulong_rtn(half);
-ulong __ovld __cnfn convert_ulong_rtz(half);
-ulong __ovld __cnfn convert_ulong_sat(half);
-ulong __ovld __cnfn convert_ulong_sat_rte(half);
-ulong __ovld __cnfn convert_ulong_sat_rtp(half);
-ulong __ovld __cnfn convert_ulong_sat_rtn(half);
-ulong __ovld __cnfn convert_ulong_sat_rtz(half);
-ulong2 __ovld __cnfn convert_ulong2(half2);
-ulong2 __ovld __cnfn convert_ulong2_rte(half2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(half2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(half2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(half2);
-ulong3 __ovld __cnfn convert_ulong3(half3);
-ulong3 __ovld __cnfn convert_ulong3_rte(half3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(half3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(half3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(half3);
-ulong4 __ovld __cnfn convert_ulong4(half4);
-ulong4 __ovld __cnfn convert_ulong4_rte(half4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(half4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(half4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(half4);
-ulong8 __ovld __cnfn convert_ulong8(half8);
-ulong8 __ovld __cnfn convert_ulong8_rte(half8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(half8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(half8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(half8);
-ulong16 __ovld __cnfn convert_ulong16(half16);
-ulong16 __ovld __cnfn convert_ulong16_rte(half16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(half16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(half16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(half16);
-char __ovld __cnfn convert_char(half);
-char __ovld __cnfn convert_char_rte(half);
-char __ovld __cnfn convert_char_rtp(half);
-char __ovld __cnfn convert_char_rtn(half);
-char __ovld __cnfn convert_char_rtz(half);
-char __ovld __cnfn convert_char_sat(half);
-char __ovld __cnfn convert_char_sat_rte(half);
-char __ovld __cnfn convert_char_sat_rtp(half);
-char __ovld __cnfn convert_char_sat_rtn(half);
-char __ovld __cnfn convert_char_sat_rtz(half);
-char2 __ovld __cnfn convert_char2(half2);
-char2 __ovld __cnfn convert_char2_rte(half2);
-char2 __ovld __cnfn convert_char2_rtp(half2);
-char2 __ovld __cnfn convert_char2_rtn(half2);
-char2 __ovld __cnfn convert_char2_rtz(half2);
-char2 __ovld __cnfn convert_char2_sat(half2);
-char2 __ovld __cnfn convert_char2_sat_rte(half2);
-char2 __ovld __cnfn convert_char2_sat_rtp(half2);
-char2 __ovld __cnfn convert_char2_sat_rtn(half2);
-char2 __ovld __cnfn convert_char2_sat_rtz(half2);
-char3 __ovld __cnfn convert_char3(half3);
-char3 __ovld __cnfn convert_char3_rte(half3);
-char3 __ovld __cnfn convert_char3_rtp(half3);
-char3 __ovld __cnfn convert_char3_rtn(half3);
-char3 __ovld __cnfn convert_char3_rtz(half3);
-char3 __ovld __cnfn convert_char3_sat(half3);
-char3 __ovld __cnfn convert_char3_sat_rte(half3);
-char3 __ovld __cnfn convert_char3_sat_rtp(half3);
-char3 __ovld __cnfn convert_char3_sat_rtn(half3);
-char3 __ovld __cnfn convert_char3_sat_rtz(half3);
-char4 __ovld __cnfn convert_char4(half4);
-char4 __ovld __cnfn convert_char4_rte(half4);
-char4 __ovld __cnfn convert_char4_rtp(half4);
-char4 __ovld __cnfn convert_char4_rtn(half4);
-char4 __ovld __cnfn convert_char4_rtz(half4);
-char4 __ovld __cnfn convert_char4_sat(half4);
-char4 __ovld __cnfn convert_char4_sat_rte(half4);
-char4 __ovld __cnfn convert_char4_sat_rtp(half4);
-char4 __ovld __cnfn convert_char4_sat_rtn(half4);
-char4 __ovld __cnfn convert_char4_sat_rtz(half4);
-char8 __ovld __cnfn convert_char8(half8);
-char8 __ovld __cnfn convert_char8_rte(half8);
-char8 __ovld __cnfn convert_char8_rtp(half8);
-char8 __ovld __cnfn convert_char8_rtn(half8);
-char8 __ovld __cnfn convert_char8_rtz(half8);
-char8 __ovld __cnfn convert_char8_sat(half8);
-char8 __ovld __cnfn convert_char8_sat_rte(half8);
-char8 __ovld __cnfn convert_char8_sat_rtp(half8);
-char8 __ovld __cnfn convert_char8_sat_rtn(half8);
-char8 __ovld __cnfn convert_char8_sat_rtz(half8);
-char16 __ovld __cnfn convert_char16(half16);
-char16 __ovld __cnfn convert_char16_rte(half16);
-char16 __ovld __cnfn convert_char16_rtp(half16);
-char16 __ovld __cnfn convert_char16_rtn(half16);
-char16 __ovld __cnfn convert_char16_rtz(half16);
-char16 __ovld __cnfn convert_char16_sat(half16);
-char16 __ovld __cnfn convert_char16_sat_rte(half16);
-char16 __ovld __cnfn convert_char16_sat_rtp(half16);
-char16 __ovld __cnfn convert_char16_sat_rtn(half16);
-char16 __ovld __cnfn convert_char16_sat_rtz(half16);
-short __ovld __cnfn convert_short(half);
-short __ovld __cnfn convert_short_rte(half);
-short __ovld __cnfn convert_short_rtp(half);
-short __ovld __cnfn convert_short_rtn(half);
-short __ovld __cnfn convert_short_rtz(half);
-short __ovld __cnfn convert_short_sat(half);
-short __ovld __cnfn convert_short_sat_rte(half);
-short __ovld __cnfn convert_short_sat_rtp(half);
-short __ovld __cnfn convert_short_sat_rtn(half);
-short __ovld __cnfn convert_short_sat_rtz(half);
-short2 __ovld __cnfn convert_short2(half2);
-short2 __ovld __cnfn convert_short2_rte(half2);
-short2 __ovld __cnfn convert_short2_rtp(half2);
-short2 __ovld __cnfn convert_short2_rtn(half2);
-short2 __ovld __cnfn convert_short2_rtz(half2);
-short2 __ovld __cnfn convert_short2_sat(half2);
-short2 __ovld __cnfn convert_short2_sat_rte(half2);
-short2 __ovld __cnfn convert_short2_sat_rtp(half2);
-short2 __ovld __cnfn convert_short2_sat_rtn(half2);
-short2 __ovld __cnfn convert_short2_sat_rtz(half2);
-short3 __ovld __cnfn convert_short3(half3);
-short3 __ovld __cnfn convert_short3_rte(half3);
-short3 __ovld __cnfn convert_short3_rtp(half3);
-short3 __ovld __cnfn convert_short3_rtn(half3);
-short3 __ovld __cnfn convert_short3_rtz(half3);
-short3 __ovld __cnfn convert_short3_sat(half3);
-short3 __ovld __cnfn convert_short3_sat_rte(half3);
-short3 __ovld __cnfn convert_short3_sat_rtp(half3);
-short3 __ovld __cnfn convert_short3_sat_rtn(half3);
-short3 __ovld __cnfn convert_short3_sat_rtz(half3);
-short4 __ovld __cnfn convert_short4(half4);
-short4 __ovld __cnfn convert_short4_rte(half4);
-short4 __ovld __cnfn convert_short4_rtp(half4);
-short4 __ovld __cnfn convert_short4_rtn(half4);
-short4 __ovld __cnfn convert_short4_rtz(half4);
-short4 __ovld __cnfn convert_short4_sat(half4);
-short4 __ovld __cnfn convert_short4_sat_rte(half4);
-short4 __ovld __cnfn convert_short4_sat_rtp(half4);
-short4 __ovld __cnfn convert_short4_sat_rtn(half4);
-short4 __ovld __cnfn convert_short4_sat_rtz(half4);
-short8 __ovld __cnfn convert_short8(half8);
-short8 __ovld __cnfn convert_short8_rte(half8);
-short8 __ovld __cnfn convert_short8_rtp(half8);
-short8 __ovld __cnfn convert_short8_rtn(half8);
-short8 __ovld __cnfn convert_short8_rtz(half8);
-short8 __ovld __cnfn convert_short8_sat(half8);
-short8 __ovld __cnfn convert_short8_sat_rte(half8);
-short8 __ovld __cnfn convert_short8_sat_rtp(half8);
-short8 __ovld __cnfn convert_short8_sat_rtn(half8);
-short8 __ovld __cnfn convert_short8_sat_rtz(half8);
-short16 __ovld __cnfn convert_short16(half16);
-short16 __ovld __cnfn convert_short16_rte(half16);
-short16 __ovld __cnfn convert_short16_rtp(half16);
-short16 __ovld __cnfn convert_short16_rtn(half16);
-short16 __ovld __cnfn convert_short16_rtz(half16);
-short16 __ovld __cnfn convert_short16_sat(half16);
-short16 __ovld __cnfn convert_short16_sat_rte(half16);
-short16 __ovld __cnfn convert_short16_sat_rtp(half16);
-short16 __ovld __cnfn convert_short16_sat_rtn(half16);
-short16 __ovld __cnfn convert_short16_sat_rtz(half16);
-int __ovld __cnfn convert_int(half);
-int __ovld __cnfn convert_int_rte(half);
-int __ovld __cnfn convert_int_rtp(half);
-int __ovld __cnfn convert_int_rtn(half);
-int __ovld __cnfn convert_int_rtz(half);
-int __ovld __cnfn convert_int_sat(half);
-int __ovld __cnfn convert_int_sat_rte(half);
-int __ovld __cnfn convert_int_sat_rtp(half);
-int __ovld __cnfn convert_int_sat_rtn(half);
-int __ovld __cnfn convert_int_sat_rtz(half);
-int2 __ovld __cnfn convert_int2(half2);
-int2 __ovld __cnfn convert_int2_rte(half2);
-int2 __ovld __cnfn convert_int2_rtp(half2);
-int2 __ovld __cnfn convert_int2_rtn(half2);
-int2 __ovld __cnfn convert_int2_rtz(half2);
-int2 __ovld __cnfn convert_int2_sat(half2);
-int2 __ovld __cnfn convert_int2_sat_rte(half2);
-int2 __ovld __cnfn convert_int2_sat_rtp(half2);
-int2 __ovld __cnfn convert_int2_sat_rtn(half2);
-int2 __ovld __cnfn convert_int2_sat_rtz(half2);
-int3 __ovld __cnfn convert_int3(half3);
-int3 __ovld __cnfn convert_int3_rte(half3);
-int3 __ovld __cnfn convert_int3_rtp(half3);
-int3 __ovld __cnfn convert_int3_rtn(half3);
-int3 __ovld __cnfn convert_int3_rtz(half3);
-int3 __ovld __cnfn convert_int3_sat(half3);
-int3 __ovld __cnfn convert_int3_sat_rte(half3);
-int3 __ovld __cnfn convert_int3_sat_rtp(half3);
-int3 __ovld __cnfn convert_int3_sat_rtn(half3);
-int3 __ovld __cnfn convert_int3_sat_rtz(half3);
-int4 __ovld __cnfn convert_int4(half4);
-int4 __ovld __cnfn convert_int4_rte(half4);
-int4 __ovld __cnfn convert_int4_rtp(half4);
-int4 __ovld __cnfn convert_int4_rtn(half4);
-int4 __ovld __cnfn convert_int4_rtz(half4);
-int4 __ovld __cnfn convert_int4_sat(half4);
-int4 __ovld __cnfn convert_int4_sat_rte(half4);
-int4 __ovld __cnfn convert_int4_sat_rtp(half4);
-int4 __ovld __cnfn convert_int4_sat_rtn(half4);
-int4 __ovld __cnfn convert_int4_sat_rtz(half4);
-int8 __ovld __cnfn convert_int8(half8);
-int8 __ovld __cnfn convert_int8_rte(half8);
-int8 __ovld __cnfn convert_int8_rtp(half8);
-int8 __ovld __cnfn convert_int8_rtn(half8);
-int8 __ovld __cnfn convert_int8_rtz(half8);
-int8 __ovld __cnfn convert_int8_sat(half8);
-int8 __ovld __cnfn convert_int8_sat_rte(half8);
-int8 __ovld __cnfn convert_int8_sat_rtp(half8);
-int8 __ovld __cnfn convert_int8_sat_rtn(half8);
-int8 __ovld __cnfn convert_int8_sat_rtz(half8);
-int16 __ovld __cnfn convert_int16(half16);
-int16 __ovld __cnfn convert_int16_rte(half16);
-int16 __ovld __cnfn convert_int16_rtp(half16);
-int16 __ovld __cnfn convert_int16_rtn(half16);
-int16 __ovld __cnfn convert_int16_rtz(half16);
-int16 __ovld __cnfn convert_int16_sat(half16);
-int16 __ovld __cnfn convert_int16_sat_rte(half16);
-int16 __ovld __cnfn convert_int16_sat_rtp(half16);
-int16 __ovld __cnfn convert_int16_sat_rtn(half16);
-int16 __ovld __cnfn convert_int16_sat_rtz(half16);
-long __ovld __cnfn convert_long(half);
-long __ovld __cnfn convert_long_rte(half);
-long __ovld __cnfn convert_long_rtp(half);
-long __ovld __cnfn convert_long_rtn(half);
-long __ovld __cnfn convert_long_rtz(half);
-long __ovld __cnfn convert_long_sat(half);
-long __ovld __cnfn convert_long_sat_rte(half);
-long __ovld __cnfn convert_long_sat_rtp(half);
-long __ovld __cnfn convert_long_sat_rtn(half);
-long __ovld __cnfn convert_long_sat_rtz(half);
-long2 __ovld __cnfn convert_long2(half2);
-long2 __ovld __cnfn convert_long2_rte(half2);
-long2 __ovld __cnfn convert_long2_rtp(half2);
-long2 __ovld __cnfn convert_long2_rtn(half2);
-long2 __ovld __cnfn convert_long2_rtz(half2);
-long2 __ovld __cnfn convert_long2_sat(half2);
-long2 __ovld __cnfn convert_long2_sat_rte(half2);
-long2 __ovld __cnfn convert_long2_sat_rtp(half2);
-long2 __ovld __cnfn convert_long2_sat_rtn(half2);
-long2 __ovld __cnfn convert_long2_sat_rtz(half2);
-long3 __ovld __cnfn convert_long3(half3);
-long3 __ovld __cnfn convert_long3_rte(half3);
-long3 __ovld __cnfn convert_long3_rtp(half3);
-long3 __ovld __cnfn convert_long3_rtn(half3);
-long3 __ovld __cnfn convert_long3_rtz(half3);
-long3 __ovld __cnfn convert_long3_sat(half3);
-long3 __ovld __cnfn convert_long3_sat_rte(half3);
-long3 __ovld __cnfn convert_long3_sat_rtp(half3);
-long3 __ovld __cnfn convert_long3_sat_rtn(half3);
-long3 __ovld __cnfn convert_long3_sat_rtz(half3);
-long4 __ovld __cnfn convert_long4(half4);
-long4 __ovld __cnfn convert_long4_rte(half4);
-long4 __ovld __cnfn convert_long4_rtp(half4);
-long4 __ovld __cnfn convert_long4_rtn(half4);
-long4 __ovld __cnfn convert_long4_rtz(half4);
-long4 __ovld __cnfn convert_long4_sat(half4);
-long4 __ovld __cnfn convert_long4_sat_rte(half4);
-long4 __ovld __cnfn convert_long4_sat_rtp(half4);
-long4 __ovld __cnfn convert_long4_sat_rtn(half4);
-long4 __ovld __cnfn convert_long4_sat_rtz(half4);
-long8 __ovld __cnfn convert_long8(half8);
-long8 __ovld __cnfn convert_long8_rte(half8);
-long8 __ovld __cnfn convert_long8_rtp(half8);
-long8 __ovld __cnfn convert_long8_rtn(half8);
-long8 __ovld __cnfn convert_long8_rtz(half8);
-long8 __ovld __cnfn convert_long8_sat(half8);
-long8 __ovld __cnfn convert_long8_sat_rte(half8);
-long8 __ovld __cnfn convert_long8_sat_rtp(half8);
-long8 __ovld __cnfn convert_long8_sat_rtn(half8);
-long8 __ovld __cnfn convert_long8_sat_rtz(half8);
-long16 __ovld __cnfn convert_long16(half16);
-long16 __ovld __cnfn convert_long16_rte(half16);
-long16 __ovld __cnfn convert_long16_rtp(half16);
-long16 __ovld __cnfn convert_long16_rtn(half16);
-long16 __ovld __cnfn convert_long16_rtz(half16);
-long16 __ovld __cnfn convert_long16_sat(half16);
-long16 __ovld __cnfn convert_long16_sat_rte(half16);
-long16 __ovld __cnfn convert_long16_sat_rtp(half16);
-long16 __ovld __cnfn convert_long16_sat_rtn(half16);
-long16 __ovld __cnfn convert_long16_sat_rtz(half16);
-float __ovld __cnfn convert_float(half);
-float __ovld __cnfn convert_float_rte(half);
-float __ovld __cnfn convert_float_rtp(half);
-float __ovld __cnfn convert_float_rtn(half);
-float __ovld __cnfn convert_float_rtz(half);
-float2 __ovld __cnfn convert_float2(half2);
-float2 __ovld __cnfn convert_float2_rte(half2);
-float2 __ovld __cnfn convert_float2_rtp(half2);
-float2 __ovld __cnfn convert_float2_rtn(half2);
-float2 __ovld __cnfn convert_float2_rtz(half2);
-float3 __ovld __cnfn convert_float3(half3);
-float3 __ovld __cnfn convert_float3_rte(half3);
-float3 __ovld __cnfn convert_float3_rtp(half3);
-float3 __ovld __cnfn convert_float3_rtn(half3);
-float3 __ovld __cnfn convert_float3_rtz(half3);
-float4 __ovld __cnfn convert_float4(half4);
-float4 __ovld __cnfn convert_float4_rte(half4);
-float4 __ovld __cnfn convert_float4_rtp(half4);
-float4 __ovld __cnfn convert_float4_rtn(half4);
-float4 __ovld __cnfn convert_float4_rtz(half4);
-float8 __ovld __cnfn convert_float8(half8);
-float8 __ovld __cnfn convert_float8_rte(half8);
-float8 __ovld __cnfn convert_float8_rtp(half8);
-float8 __ovld __cnfn convert_float8_rtn(half8);
-float8 __ovld __cnfn convert_float8_rtz(half8);
-float16 __ovld __cnfn convert_float16(half16);
-float16 __ovld __cnfn convert_float16_rte(half16);
-float16 __ovld __cnfn convert_float16_rtp(half16);
-float16 __ovld __cnfn convert_float16_rtn(half16);
-float16 __ovld __cnfn convert_float16_rtz(half16);
-
-// Convert non-double types to half types.
-half __ovld __cnfn convert_half(uchar);
-half __ovld __cnfn convert_half(ushort);
-half __ovld __cnfn convert_half(uint);
-half __ovld __cnfn convert_half(ulong);
-half __ovld __cnfn convert_half(char);
-half __ovld __cnfn convert_half(short);
-half __ovld __cnfn convert_half(int);
-half __ovld __cnfn convert_half(long);
-half __ovld __cnfn convert_half(float);
-half __ovld __cnfn convert_half(half);
-half __ovld __cnfn convert_half_rte(uchar);
-half __ovld __cnfn convert_half_rte(ushort);
-half __ovld __cnfn convert_half_rte(uint);
-half __ovld __cnfn convert_half_rte(ulong);
-half __ovld __cnfn convert_half_rte(char);
-half __ovld __cnfn convert_half_rte(short);
-half __ovld __cnfn convert_half_rte(int);
-half __ovld __cnfn convert_half_rte(long);
-half __ovld __cnfn convert_half_rte(float);
-half __ovld __cnfn convert_half_rte(half);
-half __ovld __cnfn convert_half_rtp(uchar);
-half __ovld __cnfn convert_half_rtp(ushort);
-half __ovld __cnfn convert_half_rtp(uint);
-half __ovld __cnfn convert_half_rtp(ulong);
-half __ovld __cnfn convert_half_rtp(char);
-half __ovld __cnfn convert_half_rtp(short);
-half __ovld __cnfn convert_half_rtp(int);
-half __ovld __cnfn convert_half_rtp(long);
-half __ovld __cnfn convert_half_rtp(float);
-half __ovld __cnfn convert_half_rtp(half);
-half __ovld __cnfn convert_half_rtn(uchar);
-half __ovld __cnfn convert_half_rtn(ushort);
-half __ovld __cnfn convert_half_rtn(uint);
-half __ovld __cnfn convert_half_rtn(ulong);
-half __ovld __cnfn convert_half_rtn(char);
-half __ovld __cnfn convert_half_rtn(short);
-half __ovld __cnfn convert_half_rtn(int);
-half __ovld __cnfn convert_half_rtn(long);
-half __ovld __cnfn convert_half_rtn(float);
-half __ovld __cnfn convert_half_rtn(half);
-half __ovld __cnfn convert_half_rtz(uchar);
-half __ovld __cnfn convert_half_rtz(ushort);
-half __ovld __cnfn convert_half_rtz(uint);
-half __ovld __cnfn convert_half_rtz(ulong);
-half __ovld __cnfn convert_half_rtz(char);
-half __ovld __cnfn convert_half_rtz(short);
-half __ovld __cnfn convert_half_rtz(int);
-half __ovld __cnfn convert_half_rtz(long);
-half __ovld __cnfn convert_half_rtz(float);
-half __ovld __cnfn convert_half_rtz(half);
-half2 __ovld __cnfn convert_half2(char2);
-half2 __ovld __cnfn convert_half2(uchar2);
-half2 __ovld __cnfn convert_half2(short2);
-half2 __ovld __cnfn convert_half2(ushort2);
-half2 __ovld __cnfn convert_half2(int2);
-half2 __ovld __cnfn convert_half2(uint2);
-half2 __ovld __cnfn convert_half2(long2);
-half2 __ovld __cnfn convert_half2(ulong2);
-half2 __ovld __cnfn convert_half2(float2);
-half2 __ovld __cnfn convert_half2(half2);
-half2 __ovld __cnfn convert_half2_rte(char2);
-half2 __ovld __cnfn convert_half2_rte(uchar2);
-half2 __ovld __cnfn convert_half2_rte(short2);
-half2 __ovld __cnfn convert_half2_rte(ushort2);
-half2 __ovld __cnfn convert_half2_rte(int2);
-half2 __ovld __cnfn convert_half2_rte(uint2);
-half2 __ovld __cnfn convert_half2_rte(long2);
-half2 __ovld __cnfn convert_half2_rte(ulong2);
-half2 __ovld __cnfn convert_half2_rte(float2);
-half2 __ovld __cnfn convert_half2_rte(half2);
-half2 __ovld __cnfn convert_half2_rtp(char2);
-half2 __ovld __cnfn convert_half2_rtp(uchar2);
-half2 __ovld __cnfn convert_half2_rtp(short2);
-half2 __ovld __cnfn convert_half2_rtp(ushort2);
-half2 __ovld __cnfn convert_half2_rtp(int2);
-half2 __ovld __cnfn convert_half2_rtp(uint2);
-half2 __ovld __cnfn convert_half2_rtp(long2);
-half2 __ovld __cnfn convert_half2_rtp(ulong2);
-half2 __ovld __cnfn convert_half2_rtp(float2);
-half2 __ovld __cnfn convert_half2_rtp(half2);
-half2 __ovld __cnfn convert_half2_rtn(char2);
-half2 __ovld __cnfn convert_half2_rtn(uchar2);
-half2 __ovld __cnfn convert_half2_rtn(short2);
-half2 __ovld __cnfn convert_half2_rtn(ushort2);
-half2 __ovld __cnfn convert_half2_rtn(int2);
-half2 __ovld __cnfn convert_half2_rtn(uint2);
-half2 __ovld __cnfn convert_half2_rtn(long2);
-half2 __ovld __cnfn convert_half2_rtn(ulong2);
-half2 __ovld __cnfn convert_half2_rtn(float2);
-half2 __ovld __cnfn convert_half2_rtn(half2);
-half2 __ovld __cnfn convert_half2_rtz(char2);
-half2 __ovld __cnfn convert_half2_rtz(uchar2);
-half2 __ovld __cnfn convert_half2_rtz(short2);
-half2 __ovld __cnfn convert_half2_rtz(ushort2);
-half2 __ovld __cnfn convert_half2_rtz(int2);
-half2 __ovld __cnfn convert_half2_rtz(uint2);
-half2 __ovld __cnfn convert_half2_rtz(long2);
-half2 __ovld __cnfn convert_half2_rtz(ulong2);
-half2 __ovld __cnfn convert_half2_rtz(float2);
-half2 __ovld __cnfn convert_half2_rtz(half2);
-half3 __ovld __cnfn convert_half3(char3);
-half3 __ovld __cnfn convert_half3(uchar3);
-half3 __ovld __cnfn convert_half3(short3);
-half3 __ovld __cnfn convert_half3(ushort3);
-half3 __ovld __cnfn convert_half3(int3);
-half3 __ovld __cnfn convert_half3(uint3);
-half3 __ovld __cnfn convert_half3(long3);
-half3 __ovld __cnfn convert_half3(ulong3);
-half3 __ovld __cnfn convert_half3(float3);
-half3 __ovld __cnfn convert_half3(half3);
-half3 __ovld __cnfn convert_half3_rte(char3);
-half3 __ovld __cnfn convert_half3_rte(uchar3);
-half3 __ovld __cnfn convert_half3_rte(short3);
-half3 __ovld __cnfn convert_half3_rte(ushort3);
-half3 __ovld __cnfn convert_half3_rte(int3);
-half3 __ovld __cnfn convert_half3_rte(uint3);
-half3 __ovld __cnfn convert_half3_rte(long3);
-half3 __ovld __cnfn convert_half3_rte(ulong3);
-half3 __ovld __cnfn convert_half3_rte(float3);
-half3 __ovld __cnfn convert_half3_rte(half3);
-half3 __ovld __cnfn convert_half3_rtp(char3);
-half3 __ovld __cnfn convert_half3_rtp(uchar3);
-half3 __ovld __cnfn convert_half3_rtp(short3);
-half3 __ovld __cnfn convert_half3_rtp(ushort3);
-half3 __ovld __cnfn convert_half3_rtp(int3);
-half3 __ovld __cnfn convert_half3_rtp(uint3);
-half3 __ovld __cnfn convert_half3_rtp(long3);
-half3 __ovld __cnfn convert_half3_rtp(ulong3);
-half3 __ovld __cnfn convert_half3_rtp(float3);
-half3 __ovld __cnfn convert_half3_rtp(half3);
-half3 __ovld __cnfn convert_half3_rtn(char3);
-half3 __ovld __cnfn convert_half3_rtn(uchar3);
-half3 __ovld __cnfn convert_half3_rtn(short3);
-half3 __ovld __cnfn convert_half3_rtn(ushort3);
-half3 __ovld __cnfn convert_half3_rtn(int3);
-half3 __ovld __cnfn convert_half3_rtn(uint3);
-half3 __ovld __cnfn convert_half3_rtn(long3);
-half3 __ovld __cnfn convert_half3_rtn(ulong3);
-half3 __ovld __cnfn convert_half3_rtn(float3);
-half3 __ovld __cnfn convert_half3_rtn(half3);
-half3 __ovld __cnfn convert_half3_rtz(char3);
-half3 __ovld __cnfn convert_half3_rtz(uchar3);
-half3 __ovld __cnfn convert_half3_rtz(short3);
-half3 __ovld __cnfn convert_half3_rtz(ushort3);
-half3 __ovld __cnfn convert_half3_rtz(int3);
-half3 __ovld __cnfn convert_half3_rtz(uint3);
-half3 __ovld __cnfn convert_half3_rtz(long3);
-half3 __ovld __cnfn convert_half3_rtz(ulong3);
-half3 __ovld __cnfn convert_half3_rtz(float3);
-half3 __ovld __cnfn convert_half3_rtz(half3);
-half4 __ovld __cnfn convert_half4(char4);
-half4 __ovld __cnfn convert_half4(uchar4);
-half4 __ovld __cnfn convert_half4(short4);
-half4 __ovld __cnfn convert_half4(ushort4);
-half4 __ovld __cnfn convert_half4(int4);
-half4 __ovld __cnfn convert_half4(uint4);
-half4 __ovld __cnfn convert_half4(long4);
-half4 __ovld __cnfn convert_half4(ulong4);
-half4 __ovld __cnfn convert_half4(float4);
-half4 __ovld __cnfn convert_half4(half4);
-half4 __ovld __cnfn convert_half4_rte(char4);
-half4 __ovld __cnfn convert_half4_rte(uchar4);
-half4 __ovld __cnfn convert_half4_rte(short4);
-half4 __ovld __cnfn convert_half4_rte(ushort4);
-half4 __ovld __cnfn convert_half4_rte(int4);
-half4 __ovld __cnfn convert_half4_rte(uint4);
-half4 __ovld __cnfn convert_half4_rte(long4);
-half4 __ovld __cnfn convert_half4_rte(ulong4);
-half4 __ovld __cnfn convert_half4_rte(float4);
-half4 __ovld __cnfn convert_half4_rte(half4);
-half4 __ovld __cnfn convert_half4_rtp(char4);
-half4 __ovld __cnfn convert_half4_rtp(uchar4);
-half4 __ovld __cnfn convert_half4_rtp(short4);
-half4 __ovld __cnfn convert_half4_rtp(ushort4);
-half4 __ovld __cnfn convert_half4_rtp(int4);
-half4 __ovld __cnfn convert_half4_rtp(uint4);
-half4 __ovld __cnfn convert_half4_rtp(long4);
-half4 __ovld __cnfn convert_half4_rtp(ulong4);
-half4 __ovld __cnfn convert_half4_rtp(float4);
-half4 __ovld __cnfn convert_half4_rtp(half4);
-half4 __ovld __cnfn convert_half4_rtn(char4);
-half4 __ovld __cnfn convert_half4_rtn(uchar4);
-half4 __ovld __cnfn convert_half4_rtn(short4);
-half4 __ovld __cnfn convert_half4_rtn(ushort4);
-half4 __ovld __cnfn convert_half4_rtn(int4);
-half4 __ovld __cnfn convert_half4_rtn(uint4);
-half4 __ovld __cnfn convert_half4_rtn(long4);
-half4 __ovld __cnfn convert_half4_rtn(ulong4);
-half4 __ovld __cnfn convert_half4_rtn(float4);
-half4 __ovld __cnfn convert_half4_rtn(half4);
-half4 __ovld __cnfn convert_half4_rtz(char4);
-half4 __ovld __cnfn convert_half4_rtz(uchar4);
-half4 __ovld __cnfn convert_half4_rtz(short4);
-half4 __ovld __cnfn convert_half4_rtz(ushort4);
-half4 __ovld __cnfn convert_half4_rtz(int4);
-half4 __ovld __cnfn convert_half4_rtz(uint4);
-half4 __ovld __cnfn convert_half4_rtz(long4);
-half4 __ovld __cnfn convert_half4_rtz(ulong4);
-half4 __ovld __cnfn convert_half4_rtz(float4);
-half4 __ovld __cnfn convert_half4_rtz(half4);
-half8 __ovld __cnfn convert_half8(char8);
-half8 __ovld __cnfn convert_half8(uchar8);
-half8 __ovld __cnfn convert_half8(short8);
-half8 __ovld __cnfn convert_half8(ushort8);
-half8 __ovld __cnfn convert_half8(int8);
-half8 __ovld __cnfn convert_half8(uint8);
-half8 __ovld __cnfn convert_half8(long8);
-half8 __ovld __cnfn convert_half8(ulong8);
-half8 __ovld __cnfn convert_half8(float8);
-half8 __ovld __cnfn convert_half8(half8);
-half8 __ovld __cnfn convert_half8_rte(char8);
-half8 __ovld __cnfn convert_half8_rte(uchar8);
-half8 __ovld __cnfn convert_half8_rte(short8);
-half8 __ovld __cnfn convert_half8_rte(ushort8);
-half8 __ovld __cnfn convert_half8_rte(int8);
-half8 __ovld __cnfn convert_half8_rte(uint8);
-half8 __ovld __cnfn convert_half8_rte(long8);
-half8 __ovld __cnfn convert_half8_rte(ulong8);
-half8 __ovld __cnfn convert_half8_rte(float8);
-half8 __ovld __cnfn convert_half8_rte(half8);
-half8 __ovld __cnfn convert_half8_rtp(char8);
-half8 __ovld __cnfn convert_half8_rtp(uchar8);
-half8 __ovld __cnfn convert_half8_rtp(short8);
-half8 __ovld __cnfn convert_half8_rtp(ushort8);
-half8 __ovld __cnfn convert_half8_rtp(int8);
-half8 __ovld __cnfn convert_half8_rtp(uint8);
-half8 __ovld __cnfn convert_half8_rtp(long8);
-half8 __ovld __cnfn convert_half8_rtp(ulong8);
-half8 __ovld __cnfn convert_half8_rtp(float8);
-half8 __ovld __cnfn convert_half8_rtp(half8);
-half8 __ovld __cnfn convert_half8_rtn(char8);
-half8 __ovld __cnfn convert_half8_rtn(uchar8);
-half8 __ovld __cnfn convert_half8_rtn(short8);
-half8 __ovld __cnfn convert_half8_rtn(ushort8);
-half8 __ovld __cnfn convert_half8_rtn(int8);
-half8 __ovld __cnfn convert_half8_rtn(uint8);
-half8 __ovld __cnfn convert_half8_rtn(long8);
-half8 __ovld __cnfn convert_half8_rtn(ulong8);
-half8 __ovld __cnfn convert_half8_rtn(float8);
-half8 __ovld __cnfn convert_half8_rtn(half8);
-half8 __ovld __cnfn convert_half8_rtz(char8);
-half8 __ovld __cnfn convert_half8_rtz(uchar8);
-half8 __ovld __cnfn convert_half8_rtz(short8);
-half8 __ovld __cnfn convert_half8_rtz(ushort8);
-half8 __ovld __cnfn convert_half8_rtz(int8);
-half8 __ovld __cnfn convert_half8_rtz(uint8);
-half8 __ovld __cnfn convert_half8_rtz(long8);
-half8 __ovld __cnfn convert_half8_rtz(ulong8);
-half8 __ovld __cnfn convert_half8_rtz(float8);
-half8 __ovld __cnfn convert_half8_rtz(half8);
-half16 __ovld __cnfn convert_half16(char16);
-half16 __ovld __cnfn convert_half16(uchar16);
-half16 __ovld __cnfn convert_half16(short16);
-half16 __ovld __cnfn convert_half16(ushort16);
-half16 __ovld __cnfn convert_half16(int16);
-half16 __ovld __cnfn convert_half16(uint16);
-half16 __ovld __cnfn convert_half16(long16);
-half16 __ovld __cnfn convert_half16(ulong16);
-half16 __ovld __cnfn convert_half16(float16);
-half16 __ovld __cnfn convert_half16(half16);
-half16 __ovld __cnfn convert_half16_rte(char16);
-half16 __ovld __cnfn convert_half16_rte(uchar16);
-half16 __ovld __cnfn convert_half16_rte(short16);
-half16 __ovld __cnfn convert_half16_rte(ushort16);
-half16 __ovld __cnfn convert_half16_rte(int16);
-half16 __ovld __cnfn convert_half16_rte(uint16);
-half16 __ovld __cnfn convert_half16_rte(long16);
-half16 __ovld __cnfn convert_half16_rte(ulong16);
-half16 __ovld __cnfn convert_half16_rte(float16);
-half16 __ovld __cnfn convert_half16_rte(half16);
-half16 __ovld __cnfn convert_half16_rtp(char16);
-half16 __ovld __cnfn convert_half16_rtp(uchar16);
-half16 __ovld __cnfn convert_half16_rtp(short16);
-half16 __ovld __cnfn convert_half16_rtp(ushort16);
-half16 __ovld __cnfn convert_half16_rtp(int16);
-half16 __ovld __cnfn convert_half16_rtp(uint16);
-half16 __ovld __cnfn convert_half16_rtp(long16);
-half16 __ovld __cnfn convert_half16_rtp(ulong16);
-half16 __ovld __cnfn convert_half16_rtp(float16);
-half16 __ovld __cnfn convert_half16_rtp(half16);
-half16 __ovld __cnfn convert_half16_rtn(char16);
-half16 __ovld __cnfn convert_half16_rtn(uchar16);
-half16 __ovld __cnfn convert_half16_rtn(short16);
-half16 __ovld __cnfn convert_half16_rtn(ushort16);
-half16 __ovld __cnfn convert_half16_rtn(int16);
-half16 __ovld __cnfn convert_half16_rtn(uint16);
-half16 __ovld __cnfn convert_half16_rtn(long16);
-half16 __ovld __cnfn convert_half16_rtn(ulong16);
-half16 __ovld __cnfn convert_half16_rtn(float16);
-half16 __ovld __cnfn convert_half16_rtn(half16);
-half16 __ovld __cnfn convert_half16_rtz(char16);
-half16 __ovld __cnfn convert_half16_rtz(uchar16);
-half16 __ovld __cnfn convert_half16_rtz(short16);
-half16 __ovld __cnfn convert_half16_rtz(ushort16);
-half16 __ovld __cnfn convert_half16_rtz(int16);
-half16 __ovld __cnfn convert_half16_rtz(uint16);
-half16 __ovld __cnfn convert_half16_rtz(long16);
-half16 __ovld __cnfn convert_half16_rtz(ulong16);
-half16 __ovld __cnfn convert_half16_rtz(float16);
-half16 __ovld __cnfn convert_half16_rtz(half16);
-
-// Convert half types to double types.
-#ifdef cl_khr_fp64
-double __ovld __cnfn convert_double(half);
-double __ovld __cnfn convert_double_rte(half);
-double __ovld __cnfn convert_double_rtp(half);
-double __ovld __cnfn convert_double_rtn(half);
-double __ovld __cnfn convert_double_rtz(half);
-double2 __ovld __cnfn convert_double2(half2);
-double2 __ovld __cnfn convert_double2_rte(half2);
-double2 __ovld __cnfn convert_double2_rtp(half2);
-double2 __ovld __cnfn convert_double2_rtn(half2);
-double2 __ovld __cnfn convert_double2_rtz(half2);
-double3 __ovld __cnfn convert_double3(half3);
-double3 __ovld __cnfn convert_double3_rte(half3);
-double3 __ovld __cnfn convert_double3_rtp(half3);
-double3 __ovld __cnfn convert_double3_rtn(half3);
-double3 __ovld __cnfn convert_double3_rtz(half3);
-double4 __ovld __cnfn convert_double4(half4);
-double4 __ovld __cnfn convert_double4_rte(half4);
-double4 __ovld __cnfn convert_double4_rtp(half4);
-double4 __ovld __cnfn convert_double4_rtn(half4);
-double4 __ovld __cnfn convert_double4_rtz(half4);
-double8 __ovld __cnfn convert_double8(half8);
-double8 __ovld __cnfn convert_double8_rte(half8);
-double8 __ovld __cnfn convert_double8_rtp(half8);
-double8 __ovld __cnfn convert_double8_rtn(half8);
-double8 __ovld __cnfn convert_double8_rtz(half8);
-double16 __ovld __cnfn convert_double16(half16);
-double16 __ovld __cnfn convert_double16_rte(half16);
-double16 __ovld __cnfn convert_double16_rtp(half16);
-double16 __ovld __cnfn convert_double16_rtn(half16);
-double16 __ovld __cnfn convert_double16_rtz(half16);
-
-// Convert double types to half types.
-half __ovld __cnfn convert_half(double);
-half __ovld __cnfn convert_half_rte(double);
-half __ovld __cnfn convert_half_rtp(double);
-half __ovld __cnfn convert_half_rtn(double);
-half __ovld __cnfn convert_half_rtz(double);
-half2 __ovld __cnfn convert_half2(double2);
-half2 __ovld __cnfn convert_half2_rte(double2);
-half2 __ovld __cnfn convert_half2_rtp(double2);
-half2 __ovld __cnfn convert_half2_rtn(double2);
-half2 __ovld __cnfn convert_half2_rtz(double2);
-half3 __ovld __cnfn convert_half3(double3);
-half3 __ovld __cnfn convert_half3_rte(double3);
-half3 __ovld __cnfn convert_half3_rtp(double3);
-half3 __ovld __cnfn convert_half3_rtn(double3);
-half3 __ovld __cnfn convert_half3_rtz(double3);
-half4 __ovld __cnfn convert_half4(double4);
-half4 __ovld __cnfn convert_half4_rte(double4);
-half4 __ovld __cnfn convert_half4_rtp(double4);
-half4 __ovld __cnfn convert_half4_rtn(double4);
-half4 __ovld __cnfn convert_half4_rtz(double4);
-half8 __ovld __cnfn convert_half8(double8);
-half8 __ovld __cnfn convert_half8_rte(double8);
-half8 __ovld __cnfn convert_half8_rtp(double8);
-half8 __ovld __cnfn convert_half8_rtn(double8);
-half8 __ovld __cnfn convert_half8_rtz(double8);
-half16 __ovld __cnfn convert_half16(double16);
-half16 __ovld __cnfn convert_half16_rte(double16);
-half16 __ovld __cnfn convert_half16_rtp(double16);
-half16 __ovld __cnfn convert_half16_rtn(double16);
-half16 __ovld __cnfn convert_half16_rtz(double16);
-#endif //cl_khr_fp64
-
-#endif // cl_khr_fp16
-
-// OpenCL v1.1 s6.11.1, v1.2 s6.12.1, v2.0 s6.13.1 - Work-item Functions
-
-/**
- * Returns the number of dimensions in use. This is the
- * value given to the work_dim argument specified in
- * clEnqueueNDRangeKernel.
- * For clEnqueueTask, this returns 1.
- */
-uint __ovld __cnfn get_work_dim(void);
-
-/**
- * Returns the number of global work-items specified for
- * dimension identified by dimindx. This value is given by
- * the global_work_size argument to
- * clEnqueueNDRangeKernel. Valid values of dimindx
- * are 0 to get_work_dim() - 1. For other values of
- * dimindx, get_global_size() returns 1.
- * For clEnqueueTask, this always returns 1.
- */
-size_t __ovld __cnfn get_global_size(uint dimindx);
-
-/**
- * Returns the unique global work-item ID value for
- * dimension identified by dimindx. The global work-item
- * ID specifies the work-item ID based on the number of
- * global work-items specified to execute the kernel. Valid
- * values of dimindx are 0 to get_work_dim() - 1. For
- * other values of dimindx, get_global_id() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_global_id(uint dimindx);
-
-/**
- * Returns the number of local work-items specified in
- * dimension identified by dimindx. This value is given by
- * the local_work_size argument to
- * clEnqueueNDRangeKernel if local_work_size is not
- * NULL; otherwise the OpenCL implementation chooses
- * an appropriate local_work_size value which is returned
- * by this function. Valid values of dimindx are 0 to
- * get_work_dim() - 1. For other values of dimindx,
- * get_local_size() returns 1.
- * For clEnqueueTask, this always returns 1.
- */
-size_t __ovld __cnfn get_local_size(uint dimindx);
-
-/**
- * Returns the unique local work-item ID i.e. a work-item
- * within a specific work-group for dimension identified by
- * dimindx. Valid values of dimindx are 0 to
- * get_work_dim() - 1. For other values of dimindx,
- * get_local_id() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_local_id(uint dimindx);
-
-/**
- * Returns the number of work-groups that will execute a
- * kernel for dimension identified by dimindx.
- * Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values of dimindx, get_num_groups() returns 1.
- * For clEnqueueTask, this always returns 1.
- */
-size_t __ovld __cnfn get_num_groups(uint dimindx);
-
-/**
- * get_group_id returns the work-group ID which is a
- * number from 0 .. get_num_groups(dimindx) - 1.
- * Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values, get_group_id() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_group_id(uint dimindx);
-
-/**
- * get_global_offset returns the offset values specified in
- * global_work_offset argument to
- * clEnqueueNDRangeKernel.
- * Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values, get_global_offset() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_global_offset(uint dimindx);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-size_t __ovld get_enqueued_local_size(uint dimindx);
-size_t __ovld get_global_linear_id(void);
-size_t __ovld get_local_linear_id(void);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.2, v1.2 s6.12.2, v2.0 s6.13.2 - Math functions
-
-/**
- * Arc cosine function.
- */
-float __ovld __cnfn acos(float);
-float2 __ovld __cnfn acos(float2);
-float3 __ovld __cnfn acos(float3);
-float4 __ovld __cnfn acos(float4);
-float8 __ovld __cnfn acos(float8);
-float16 __ovld __cnfn acos(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn acos(double);
-double2 __ovld __cnfn acos(double2);
-double3 __ovld __cnfn acos(double3);
-double4 __ovld __cnfn acos(double4);
-double8 __ovld __cnfn acos(double8);
-double16 __ovld __cnfn acos(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn acos(half);
-half2 __ovld __cnfn acos(half2);
-half3 __ovld __cnfn acos(half3);
-half4 __ovld __cnfn acos(half4);
-half8 __ovld __cnfn acos(half8);
-half16 __ovld __cnfn acos(half16);
-#endif //cl_khr_fp16
-
-/**
- * Inverse hyperbolic cosine.
- */
-float __ovld __cnfn acosh(float);
-float2 __ovld __cnfn acosh(float2);
-float3 __ovld __cnfn acosh(float3);
-float4 __ovld __cnfn acosh(float4);
-float8 __ovld __cnfn acosh(float8);
-float16 __ovld __cnfn acosh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn acosh(double);
-double2 __ovld __cnfn acosh(double2);
-double3 __ovld __cnfn acosh(double3);
-double4 __ovld __cnfn acosh(double4);
-double8 __ovld __cnfn acosh(double8);
-double16 __ovld __cnfn acosh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn acosh(half);
-half2 __ovld __cnfn acosh(half2);
-half3 __ovld __cnfn acosh(half3);
-half4 __ovld __cnfn acosh(half4);
-half8 __ovld __cnfn acosh(half8);
-half16 __ovld __cnfn acosh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute acos (x) / PI.
- */
-float __ovld __cnfn acospi(float x);
-float2 __ovld __cnfn acospi(float2 x);
-float3 __ovld __cnfn acospi(float3 x);
-float4 __ovld __cnfn acospi(float4 x);
-float8 __ovld __cnfn acospi(float8 x);
-float16 __ovld __cnfn acospi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn acospi(double x);
-double2 __ovld __cnfn acospi(double2 x);
-double3 __ovld __cnfn acospi(double3 x);
-double4 __ovld __cnfn acospi(double4 x);
-double8 __ovld __cnfn acospi(double8 x);
-double16 __ovld __cnfn acospi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn acospi(half x);
-half2 __ovld __cnfn acospi(half2 x);
-half3 __ovld __cnfn acospi(half3 x);
-half4 __ovld __cnfn acospi(half4 x);
-half8 __ovld __cnfn acospi(half8 x);
-half16 __ovld __cnfn acospi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Arc sine function.
- */
-float __ovld __cnfn asin(float);
-float2 __ovld __cnfn asin(float2);
-float3 __ovld __cnfn asin(float3);
-float4 __ovld __cnfn asin(float4);
-float8 __ovld __cnfn asin(float8);
-float16 __ovld __cnfn asin(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn asin(double);
-double2 __ovld __cnfn asin(double2);
-double3 __ovld __cnfn asin(double3);
-double4 __ovld __cnfn asin(double4);
-double8 __ovld __cnfn asin(double8);
-double16 __ovld __cnfn asin(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn asin(half);
-half2 __ovld __cnfn asin(half2);
-half3 __ovld __cnfn asin(half3);
-half4 __ovld __cnfn asin(half4);
-half8 __ovld __cnfn asin(half8);
-half16 __ovld __cnfn asin(half16);
-#endif //cl_khr_fp16
-
-/**
- * Inverse hyperbolic sine.
- */
-float __ovld __cnfn asinh(float);
-float2 __ovld __cnfn asinh(float2);
-float3 __ovld __cnfn asinh(float3);
-float4 __ovld __cnfn asinh(float4);
-float8 __ovld __cnfn asinh(float8);
-float16 __ovld __cnfn asinh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn asinh(double);
-double2 __ovld __cnfn asinh(double2);
-double3 __ovld __cnfn asinh(double3);
-double4 __ovld __cnfn asinh(double4);
-double8 __ovld __cnfn asinh(double8);
-double16 __ovld __cnfn asinh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn asinh(half);
-half2 __ovld __cnfn asinh(half2);
-half3 __ovld __cnfn asinh(half3);
-half4 __ovld __cnfn asinh(half4);
-half8 __ovld __cnfn asinh(half8);
-half16 __ovld __cnfn asinh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute asin (x) / PI.
- */
-float __ovld __cnfn asinpi(float x);
-float2 __ovld __cnfn asinpi(float2 x);
-float3 __ovld __cnfn asinpi(float3 x);
-float4 __ovld __cnfn asinpi(float4 x);
-float8 __ovld __cnfn asinpi(float8 x);
-float16 __ovld __cnfn asinpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn asinpi(double x);
-double2 __ovld __cnfn asinpi(double2 x);
-double3 __ovld __cnfn asinpi(double3 x);
-double4 __ovld __cnfn asinpi(double4 x);
-double8 __ovld __cnfn asinpi(double8 x);
-double16 __ovld __cnfn asinpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn asinpi(half x);
-half2 __ovld __cnfn asinpi(half2 x);
-half3 __ovld __cnfn asinpi(half3 x);
-half4 __ovld __cnfn asinpi(half4 x);
-half8 __ovld __cnfn asinpi(half8 x);
-half16 __ovld __cnfn asinpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Arc tangent function.
- */
-float __ovld __cnfn atan(float y_over_x);
-float2 __ovld __cnfn atan(float2 y_over_x);
-float3 __ovld __cnfn atan(float3 y_over_x);
-float4 __ovld __cnfn atan(float4 y_over_x);
-float8 __ovld __cnfn atan(float8 y_over_x);
-float16 __ovld __cnfn atan(float16 y_over_x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atan(double y_over_x);
-double2 __ovld __cnfn atan(double2 y_over_x);
-double3 __ovld __cnfn atan(double3 y_over_x);
-double4 __ovld __cnfn atan(double4 y_over_x);
-double8 __ovld __cnfn atan(double8 y_over_x);
-double16 __ovld __cnfn atan(double16 y_over_x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atan(half y_over_x);
-half2 __ovld __cnfn atan(half2 y_over_x);
-half3 __ovld __cnfn atan(half3 y_over_x);
-half4 __ovld __cnfn atan(half4 y_over_x);
-half8 __ovld __cnfn atan(half8 y_over_x);
-half16 __ovld __cnfn atan(half16 y_over_x);
-#endif //cl_khr_fp16
-
-/**
- * Arc tangent of y / x.
- */
-float __ovld __cnfn atan2(float y, float x);
-float2 __ovld __cnfn atan2(float2 y, float2 x);
-float3 __ovld __cnfn atan2(float3 y, float3 x);
-float4 __ovld __cnfn atan2(float4 y, float4 x);
-float8 __ovld __cnfn atan2(float8 y, float8 x);
-float16 __ovld __cnfn atan2(float16 y, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atan2(double y, double x);
-double2 __ovld __cnfn atan2(double2 y, double2 x);
-double3 __ovld __cnfn atan2(double3 y, double3 x);
-double4 __ovld __cnfn atan2(double4 y, double4 x);
-double8 __ovld __cnfn atan2(double8 y, double8 x);
-double16 __ovld __cnfn atan2(double16 y, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atan2(half y, half x);
-half2 __ovld __cnfn atan2(half2 y, half2 x);
-half3 __ovld __cnfn atan2(half3 y, half3 x);
-half4 __ovld __cnfn atan2(half4 y, half4 x);
-half8 __ovld __cnfn atan2(half8 y, half8 x);
-half16 __ovld __cnfn atan2(half16 y, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Hyperbolic arc tangent.
- */
-float __ovld __cnfn atanh(float);
-float2 __ovld __cnfn atanh(float2);
-float3 __ovld __cnfn atanh(float3);
-float4 __ovld __cnfn atanh(float4);
-float8 __ovld __cnfn atanh(float8);
-float16 __ovld __cnfn atanh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atanh(double);
-double2 __ovld __cnfn atanh(double2);
-double3 __ovld __cnfn atanh(double3);
-double4 __ovld __cnfn atanh(double4);
-double8 __ovld __cnfn atanh(double8);
-double16 __ovld __cnfn atanh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atanh(half);
-half2 __ovld __cnfn atanh(half2);
-half3 __ovld __cnfn atanh(half3);
-half4 __ovld __cnfn atanh(half4);
-half8 __ovld __cnfn atanh(half8);
-half16 __ovld __cnfn atanh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute atan (x) / PI.
- */
-float __ovld __cnfn atanpi(float x);
-float2 __ovld __cnfn atanpi(float2 x);
-float3 __ovld __cnfn atanpi(float3 x);
-float4 __ovld __cnfn atanpi(float4 x);
-float8 __ovld __cnfn atanpi(float8 x);
-float16 __ovld __cnfn atanpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atanpi(double x);
-double2 __ovld __cnfn atanpi(double2 x);
-double3 __ovld __cnfn atanpi(double3 x);
-double4 __ovld __cnfn atanpi(double4 x);
-double8 __ovld __cnfn atanpi(double8 x);
-double16 __ovld __cnfn atanpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atanpi(half x);
-half2 __ovld __cnfn atanpi(half2 x);
-half3 __ovld __cnfn atanpi(half3 x);
-half4 __ovld __cnfn atanpi(half4 x);
-half8 __ovld __cnfn atanpi(half8 x);
-half16 __ovld __cnfn atanpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute atan2 (y, x) / PI.
- */
-float __ovld __cnfn atan2pi(float y, float x);
-float2 __ovld __cnfn atan2pi(float2 y, float2 x);
-float3 __ovld __cnfn atan2pi(float3 y, float3 x);
-float4 __ovld __cnfn atan2pi(float4 y, float4 x);
-float8 __ovld __cnfn atan2pi(float8 y, float8 x);
-float16 __ovld __cnfn atan2pi(float16 y, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atan2pi(double y, double x);
-double2 __ovld __cnfn atan2pi(double2 y, double2 x);
-double3 __ovld __cnfn atan2pi(double3 y, double3 x);
-double4 __ovld __cnfn atan2pi(double4 y, double4 x);
-double8 __ovld __cnfn atan2pi(double8 y, double8 x);
-double16 __ovld __cnfn atan2pi(double16 y, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atan2pi(half y, half x);
-half2 __ovld __cnfn atan2pi(half2 y, half2 x);
-half3 __ovld __cnfn atan2pi(half3 y, half3 x);
-half4 __ovld __cnfn atan2pi(half4 y, half4 x);
-half8 __ovld __cnfn atan2pi(half8 y, half8 x);
-half16 __ovld __cnfn atan2pi(half16 y, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute cube-root.
- */
-float __ovld __cnfn cbrt(float);
-float2 __ovld __cnfn cbrt(float2);
-float3 __ovld __cnfn cbrt(float3);
-float4 __ovld __cnfn cbrt(float4);
-float8 __ovld __cnfn cbrt(float8);
-float16 __ovld __cnfn cbrt(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cbrt(double);
-double2 __ovld __cnfn cbrt(double2);
-double3 __ovld __cnfn cbrt(double3);
-double4 __ovld __cnfn cbrt(double4);
-double8 __ovld __cnfn cbrt(double8);
-double16 __ovld __cnfn cbrt(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cbrt(half);
-half2 __ovld __cnfn cbrt(half2);
-half3 __ovld __cnfn cbrt(half3);
-half4 __ovld __cnfn cbrt(half4);
-half8 __ovld __cnfn cbrt(half8);
-half16 __ovld __cnfn cbrt(half16);
-#endif //cl_khr_fp16
-
-/**
- * Round to integral value using the round to positive
- * infinity rounding mode.
- */
-float __ovld __cnfn ceil(float);
-float2 __ovld __cnfn ceil(float2);
-float3 __ovld __cnfn ceil(float3);
-float4 __ovld __cnfn ceil(float4);
-float8 __ovld __cnfn ceil(float8);
-float16 __ovld __cnfn ceil(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn ceil(double);
-double2 __ovld __cnfn ceil(double2);
-double3 __ovld __cnfn ceil(double3);
-double4 __ovld __cnfn ceil(double4);
-double8 __ovld __cnfn ceil(double8);
-double16 __ovld __cnfn ceil(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn ceil(half);
-half2 __ovld __cnfn ceil(half2);
-half3 __ovld __cnfn ceil(half3);
-half4 __ovld __cnfn ceil(half4);
-half8 __ovld __cnfn ceil(half8);
-half16 __ovld __cnfn ceil(half16);
-#endif //cl_khr_fp16
-
-/**
- * Returns x with its sign changed to match the sign of y.
- */
-float __ovld __cnfn copysign(float x, float y);
-float2 __ovld __cnfn copysign(float2 x, float2 y);
-float3 __ovld __cnfn copysign(float3 x, float3 y);
-float4 __ovld __cnfn copysign(float4 x, float4 y);
-float8 __ovld __cnfn copysign(float8 x, float8 y);
-float16 __ovld __cnfn copysign(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn copysign(double x, double y);
-double2 __ovld __cnfn copysign(double2 x, double2 y);
-double3 __ovld __cnfn copysign(double3 x, double3 y);
-double4 __ovld __cnfn copysign(double4 x, double4 y);
-double8 __ovld __cnfn copysign(double8 x, double8 y);
-double16 __ovld __cnfn copysign(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn copysign(half x, half y);
-half2 __ovld __cnfn copysign(half2 x, half2 y);
-half3 __ovld __cnfn copysign(half3 x, half3 y);
-half4 __ovld __cnfn copysign(half4 x, half4 y);
-half8 __ovld __cnfn copysign(half8 x, half8 y);
-half16 __ovld __cnfn copysign(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute cosine.
- */
-float __ovld __cnfn cos(float);
-float2 __ovld __cnfn cos(float2);
-float3 __ovld __cnfn cos(float3);
-float4 __ovld __cnfn cos(float4);
-float8 __ovld __cnfn cos(float8);
-float16 __ovld __cnfn cos(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cos(double);
-double2 __ovld __cnfn cos(double2);
-double3 __ovld __cnfn cos(double3);
-double4 __ovld __cnfn cos(double4);
-double8 __ovld __cnfn cos(double8);
-double16 __ovld __cnfn cos(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cos(half);
-half2 __ovld __cnfn cos(half2);
-half3 __ovld __cnfn cos(half3);
-half4 __ovld __cnfn cos(half4);
-half8 __ovld __cnfn cos(half8);
-half16 __ovld __cnfn cos(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute hyperbolic cosine.
- */
-float __ovld __cnfn cosh(float);
-float2 __ovld __cnfn cosh(float2);
-float3 __ovld __cnfn cosh(float3);
-float4 __ovld __cnfn cosh(float4);
-float8 __ovld __cnfn cosh(float8);
-float16 __ovld __cnfn cosh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cosh(double);
-double2 __ovld __cnfn cosh(double2);
-double3 __ovld __cnfn cosh(double3);
-double4 __ovld __cnfn cosh(double4);
-double8 __ovld __cnfn cosh(double8);
-double16 __ovld __cnfn cosh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cosh(half);
-half2 __ovld __cnfn cosh(half2);
-half3 __ovld __cnfn cosh(half3);
-half4 __ovld __cnfn cosh(half4);
-half8 __ovld __cnfn cosh(half8);
-half16 __ovld __cnfn cosh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute cos (PI * x).
- */
-float __ovld __cnfn cospi(float x);
-float2 __ovld __cnfn cospi(float2 x);
-float3 __ovld __cnfn cospi(float3 x);
-float4 __ovld __cnfn cospi(float4 x);
-float8 __ovld __cnfn cospi(float8 x);
-float16 __ovld __cnfn cospi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cospi(double x);
-double2 __ovld __cnfn cospi(double2 x);
-double3 __ovld __cnfn cospi(double3 x);
-double4 __ovld __cnfn cospi(double4 x);
-double8 __ovld __cnfn cospi(double8 x);
-double16 __ovld __cnfn cospi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cospi(half x);
-half2 __ovld __cnfn cospi(half2 x);
-half3 __ovld __cnfn cospi(half3 x);
-half4 __ovld __cnfn cospi(half4 x);
-half8 __ovld __cnfn cospi(half8 x);
-half16 __ovld __cnfn cospi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Complementary error function.
- */
-float __ovld __cnfn erfc(float);
-float2 __ovld __cnfn erfc(float2);
-float3 __ovld __cnfn erfc(float3);
-float4 __ovld __cnfn erfc(float4);
-float8 __ovld __cnfn erfc(float8);
-float16 __ovld __cnfn erfc(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn erfc(double);
-double2 __ovld __cnfn erfc(double2);
-double3 __ovld __cnfn erfc(double3);
-double4 __ovld __cnfn erfc(double4);
-double8 __ovld __cnfn erfc(double8);
-double16 __ovld __cnfn erfc(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn erfc(half);
-half2 __ovld __cnfn erfc(half2);
-half3 __ovld __cnfn erfc(half3);
-half4 __ovld __cnfn erfc(half4);
-half8 __ovld __cnfn erfc(half8);
-half16 __ovld __cnfn erfc(half16);
-#endif //cl_khr_fp16
-
-/**
- * Error function encountered in integrating the
- * normal distribution.
- */
-float __ovld __cnfn erf(float);
-float2 __ovld __cnfn erf(float2);
-float3 __ovld __cnfn erf(float3);
-float4 __ovld __cnfn erf(float4);
-float8 __ovld __cnfn erf(float8);
-float16 __ovld __cnfn erf(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn erf(double);
-double2 __ovld __cnfn erf(double2);
-double3 __ovld __cnfn erf(double3);
-double4 __ovld __cnfn erf(double4);
-double8 __ovld __cnfn erf(double8);
-double16 __ovld __cnfn erf(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn erf(half);
-half2 __ovld __cnfn erf(half2);
-half3 __ovld __cnfn erf(half3);
-half4 __ovld __cnfn erf(half4);
-half8 __ovld __cnfn erf(half8);
-half16 __ovld __cnfn erf(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute the base e exponential function of x.
- */
-float __ovld __cnfn exp(float x);
-float2 __ovld __cnfn exp(float2 x);
-float3 __ovld __cnfn exp(float3 x);
-float4 __ovld __cnfn exp(float4 x);
-float8 __ovld __cnfn exp(float8 x);
-float16 __ovld __cnfn exp(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn exp(double x);
-double2 __ovld __cnfn exp(double2 x);
-double3 __ovld __cnfn exp(double3 x);
-double4 __ovld __cnfn exp(double4 x);
-double8 __ovld __cnfn exp(double8 x);
-double16 __ovld __cnfn exp(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn exp(half x);
-half2 __ovld __cnfn exp(half2 x);
-half3 __ovld __cnfn exp(half3 x);
-half4 __ovld __cnfn exp(half4 x);
-half8 __ovld __cnfn exp(half8 x);
-half16 __ovld __cnfn exp(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Exponential base 2 function.
- */
-float __ovld __cnfn exp2(float);
-float2 __ovld __cnfn exp2(float2);
-float3 __ovld __cnfn exp2(float3);
-float4 __ovld __cnfn exp2(float4);
-float8 __ovld __cnfn exp2(float8);
-float16 __ovld __cnfn exp2(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn exp2(double);
-double2 __ovld __cnfn exp2(double2);
-double3 __ovld __cnfn exp2(double3);
-double4 __ovld __cnfn exp2(double4);
-double8 __ovld __cnfn exp2(double8);
-double16 __ovld __cnfn exp2(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn exp2(half);
-half2 __ovld __cnfn exp2(half2);
-half3 __ovld __cnfn exp2(half3);
-half4 __ovld __cnfn exp2(half4);
-half8 __ovld __cnfn exp2(half8);
-half16 __ovld __cnfn exp2(half16);
-#endif //cl_khr_fp16
-
-/**
- * Exponential base 10 function.
- */
-float __ovld __cnfn exp10(float);
-float2 __ovld __cnfn exp10(float2);
-float3 __ovld __cnfn exp10(float3);
-float4 __ovld __cnfn exp10(float4);
-float8 __ovld __cnfn exp10(float8);
-float16 __ovld __cnfn exp10(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn exp10(double);
-double2 __ovld __cnfn exp10(double2);
-double3 __ovld __cnfn exp10(double3);
-double4 __ovld __cnfn exp10(double4);
-double8 __ovld __cnfn exp10(double8);
-double16 __ovld __cnfn exp10(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn exp10(half);
-half2 __ovld __cnfn exp10(half2);
-half3 __ovld __cnfn exp10(half3);
-half4 __ovld __cnfn exp10(half4);
-half8 __ovld __cnfn exp10(half8);
-half16 __ovld __cnfn exp10(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute e^x- 1.0.
- */
-float __ovld __cnfn expm1(float x);
-float2 __ovld __cnfn expm1(float2 x);
-float3 __ovld __cnfn expm1(float3 x);
-float4 __ovld __cnfn expm1(float4 x);
-float8 __ovld __cnfn expm1(float8 x);
-float16 __ovld __cnfn expm1(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn expm1(double x);
-double2 __ovld __cnfn expm1(double2 x);
-double3 __ovld __cnfn expm1(double3 x);
-double4 __ovld __cnfn expm1(double4 x);
-double8 __ovld __cnfn expm1(double8 x);
-double16 __ovld __cnfn expm1(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn expm1(half x);
-half2 __ovld __cnfn expm1(half2 x);
-half3 __ovld __cnfn expm1(half3 x);
-half4 __ovld __cnfn expm1(half4 x);
-half8 __ovld __cnfn expm1(half8 x);
-half16 __ovld __cnfn expm1(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute absolute value of a floating-point number.
- */
-float __ovld __cnfn fabs(float);
-float2 __ovld __cnfn fabs(float2);
-float3 __ovld __cnfn fabs(float3);
-float4 __ovld __cnfn fabs(float4);
-float8 __ovld __cnfn fabs(float8);
-float16 __ovld __cnfn fabs(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fabs(double);
-double2 __ovld __cnfn fabs(double2);
-double3 __ovld __cnfn fabs(double3);
-double4 __ovld __cnfn fabs(double4);
-double8 __ovld __cnfn fabs(double8);
-double16 __ovld __cnfn fabs(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fabs(half);
-half2 __ovld __cnfn fabs(half2);
-half3 __ovld __cnfn fabs(half3);
-half4 __ovld __cnfn fabs(half4);
-half8 __ovld __cnfn fabs(half8);
-half16 __ovld __cnfn fabs(half16);
-#endif //cl_khr_fp16
-
-/**
- * x - y if x > y, +0 if x is less than or equal to y.
- */
-float __ovld __cnfn fdim(float x, float y);
-float2 __ovld __cnfn fdim(float2 x, float2 y);
-float3 __ovld __cnfn fdim(float3 x, float3 y);
-float4 __ovld __cnfn fdim(float4 x, float4 y);
-float8 __ovld __cnfn fdim(float8 x, float8 y);
-float16 __ovld __cnfn fdim(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fdim(double x, double y);
-double2 __ovld __cnfn fdim(double2 x, double2 y);
-double3 __ovld __cnfn fdim(double3 x, double3 y);
-double4 __ovld __cnfn fdim(double4 x, double4 y);
-double8 __ovld __cnfn fdim(double8 x, double8 y);
-double16 __ovld __cnfn fdim(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fdim(half x, half y);
-half2 __ovld __cnfn fdim(half2 x, half2 y);
-half3 __ovld __cnfn fdim(half3 x, half3 y);
-half4 __ovld __cnfn fdim(half4 x, half4 y);
-half8 __ovld __cnfn fdim(half8 x, half8 y);
-half16 __ovld __cnfn fdim(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Round to integral value using the round to -ve
- * infinity rounding mode.
- */
-float __ovld __cnfn floor(float);
-float2 __ovld __cnfn floor(float2);
-float3 __ovld __cnfn floor(float3);
-float4 __ovld __cnfn floor(float4);
-float8 __ovld __cnfn floor(float8);
-float16 __ovld __cnfn floor(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn floor(double);
-double2 __ovld __cnfn floor(double2);
-double3 __ovld __cnfn floor(double3);
-double4 __ovld __cnfn floor(double4);
-double8 __ovld __cnfn floor(double8);
-double16 __ovld __cnfn floor(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn floor(half);
-half2 __ovld __cnfn floor(half2);
-half3 __ovld __cnfn floor(half3);
-half4 __ovld __cnfn floor(half4);
-half8 __ovld __cnfn floor(half8);
-half16 __ovld __cnfn floor(half16);
-#endif //cl_khr_fp16
-
-/**
- * Returns the correctly rounded floating-point
- * representation of the sum of c with the infinitely
- * precise product of a and b. Rounding of
- * intermediate products shall not occur. Edge case
- * behavior is per the IEEE 754-2008 standard.
- */
-float __ovld __cnfn fma(float a, float b, float c);
-float2 __ovld __cnfn fma(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn fma(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn fma(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn fma(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn fma(float16 a, float16 b, float16 c);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fma(double a, double b, double c);
-double2 __ovld __cnfn fma(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn fma(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn fma(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn fma(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn fma(double16 a, double16 b, double16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fma(half a, half b, half c);
-half2 __ovld __cnfn fma(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn fma(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn fma(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn fma(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn fma(half16 a, half16 b, half16 c);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if x < y, otherwise it returns x. If one
- * argument is a NaN, fmax() returns the other
- * argument. If both arguments are NaNs, fmax()
- * returns a NaN.
- */
-float __ovld __cnfn fmax(float x, float y);
-float2 __ovld __cnfn fmax(float2 x, float2 y);
-float3 __ovld __cnfn fmax(float3 x, float3 y);
-float4 __ovld __cnfn fmax(float4 x, float4 y);
-float8 __ovld __cnfn fmax(float8 x, float8 y);
-float16 __ovld __cnfn fmax(float16 x, float16 y);
-float2 __ovld __cnfn fmax(float2 x, float y);
-float3 __ovld __cnfn fmax(float3 x, float y);
-float4 __ovld __cnfn fmax(float4 x, float y);
-float8 __ovld __cnfn fmax(float8 x, float y);
-float16 __ovld __cnfn fmax(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fmax(double x, double y);
-double2 __ovld __cnfn fmax(double2 x, double2 y);
-double3 __ovld __cnfn fmax(double3 x, double3 y);
-double4 __ovld __cnfn fmax(double4 x, double4 y);
-double8 __ovld __cnfn fmax(double8 x, double8 y);
-double16 __ovld __cnfn fmax(double16 x, double16 y);
-double2 __ovld __cnfn fmax(double2 x, double y);
-double3 __ovld __cnfn fmax(double3 x, double y);
-double4 __ovld __cnfn fmax(double4 x, double y);
-double8 __ovld __cnfn fmax(double8 x, double y);
-double16 __ovld __cnfn fmax(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fmax(half x, half y);
-half2 __ovld __cnfn fmax(half2 x, half2 y);
-half3 __ovld __cnfn fmax(half3 x, half3 y);
-half4 __ovld __cnfn fmax(half4 x, half4 y);
-half8 __ovld __cnfn fmax(half8 x, half8 y);
-half16 __ovld __cnfn fmax(half16 x, half16 y);
-half2 __ovld __cnfn fmax(half2 x, half y);
-half3 __ovld __cnfn fmax(half3 x, half y);
-half4 __ovld __cnfn fmax(half4 x, half y);
-half8 __ovld __cnfn fmax(half8 x, half y);
-half16 __ovld __cnfn fmax(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if y < x, otherwise it returns x. If one
- * argument is a NaN, fmin() returns the other
- * argument. If both arguments are NaNs, fmin()
- * returns a NaN.
- */
-float __ovld __cnfn fmin(float x, float y);
-float2 __ovld __cnfn fmin(float2 x, float2 y);
-float3 __ovld __cnfn fmin(float3 x, float3 y);
-float4 __ovld __cnfn fmin(float4 x, float4 y);
-float8 __ovld __cnfn fmin(float8 x, float8 y);
-float16 __ovld __cnfn fmin(float16 x, float16 y);
-float2 __ovld __cnfn fmin(float2 x, float y);
-float3 __ovld __cnfn fmin(float3 x, float y);
-float4 __ovld __cnfn fmin(float4 x, float y);
-float8 __ovld __cnfn fmin(float8 x, float y);
-float16 __ovld __cnfn fmin(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fmin(double x, double y);
-double2 __ovld __cnfn fmin(double2 x, double2 y);
-double3 __ovld __cnfn fmin(double3 x, double3 y);
-double4 __ovld __cnfn fmin(double4 x, double4 y);
-double8 __ovld __cnfn fmin(double8 x, double8 y);
-double16 __ovld __cnfn fmin(double16 x, double16 y);
-double2 __ovld __cnfn fmin(double2 x, double y);
-double3 __ovld __cnfn fmin(double3 x, double y);
-double4 __ovld __cnfn fmin(double4 x, double y);
-double8 __ovld __cnfn fmin(double8 x, double y);
-double16 __ovld __cnfn fmin(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fmin(half x, half y);
-half2 __ovld __cnfn fmin(half2 x, half2 y);
-half3 __ovld __cnfn fmin(half3 x, half3 y);
-half4 __ovld __cnfn fmin(half4 x, half4 y);
-half8 __ovld __cnfn fmin(half8 x, half8 y);
-half16 __ovld __cnfn fmin(half16 x, half16 y);
-half2 __ovld __cnfn fmin(half2 x, half y);
-half3 __ovld __cnfn fmin(half3 x, half y);
-half4 __ovld __cnfn fmin(half4 x, half y);
-half8 __ovld __cnfn fmin(half8 x, half y);
-half16 __ovld __cnfn fmin(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Modulus. Returns x - y * trunc (x/y).
- */
-float __ovld __cnfn fmod(float x, float y);
-float2 __ovld __cnfn fmod(float2 x, float2 y);
-float3 __ovld __cnfn fmod(float3 x, float3 y);
-float4 __ovld __cnfn fmod(float4 x, float4 y);
-float8 __ovld __cnfn fmod(float8 x, float8 y);
-float16 __ovld __cnfn fmod(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fmod(double x, double y);
-double2 __ovld __cnfn fmod(double2 x, double2 y);
-double3 __ovld __cnfn fmod(double3 x, double3 y);
-double4 __ovld __cnfn fmod(double4 x, double4 y);
-double8 __ovld __cnfn fmod(double8 x, double8 y);
-double16 __ovld __cnfn fmod(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fmod(half x, half y);
-half2 __ovld __cnfn fmod(half2 x, half2 y);
-half3 __ovld __cnfn fmod(half3 x, half3 y);
-half4 __ovld __cnfn fmod(half4 x, half4 y);
-half8 __ovld __cnfn fmod(half8 x, half8 y);
-half16 __ovld __cnfn fmod(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns fmin(x - floor (x), 0x1.fffffep-1f ).
- * floor(x) is returned in iptr.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld fract(float x, float *iptr);
-float2 __ovld fract(float2 x, float2 *iptr);
-float3 __ovld fract(float3 x, float3 *iptr);
-float4 __ovld fract(float4 x, float4 *iptr);
-float8 __ovld fract(float8 x, float8 *iptr);
-float16 __ovld fract(float16 x, float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld fract(double x, double *iptr);
-double2 __ovld fract(double2 x, double2 *iptr);
-double3 __ovld fract(double3 x, double3 *iptr);
-double4 __ovld fract(double4 x, double4 *iptr);
-double8 __ovld fract(double8 x, double8 *iptr);
-double16 __ovld fract(double16 x, double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld fract(half x, half *iptr);
-half2 __ovld fract(half2 x, half2 *iptr);
-half3 __ovld fract(half3 x, half3 *iptr);
-half4 __ovld fract(half4 x, half4 *iptr);
-half8 __ovld fract(half8 x, half8 *iptr);
-half16 __ovld fract(half16 x, half16 *iptr);
-#endif //cl_khr_fp16
-#else
-float __ovld fract(float x, __global float *iptr);
-float2 __ovld fract(float2 x, __global float2 *iptr);
-float3 __ovld fract(float3 x, __global float3 *iptr);
-float4 __ovld fract(float4 x, __global float4 *iptr);
-float8 __ovld fract(float8 x, __global float8 *iptr);
-float16 __ovld fract(float16 x, __global float16 *iptr);
-float __ovld fract(float x, __local float *iptr);
-float2 __ovld fract(float2 x, __local float2 *iptr);
-float3 __ovld fract(float3 x, __local float3 *iptr);
-float4 __ovld fract(float4 x, __local float4 *iptr);
-float8 __ovld fract(float8 x, __local float8 *iptr);
-float16 __ovld fract(float16 x, __local float16 *iptr);
-float __ovld fract(float x, __private float *iptr);
-float2 __ovld fract(float2 x, __private float2 *iptr);
-float3 __ovld fract(float3 x, __private float3 *iptr);
-float4 __ovld fract(float4 x, __private float4 *iptr);
-float8 __ovld fract(float8 x, __private float8 *iptr);
-float16 __ovld fract(float16 x, __private float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld fract(double x, __global double *iptr);
-double2 __ovld fract(double2 x, __global double2 *iptr);
-double3 __ovld fract(double3 x, __global double3 *iptr);
-double4 __ovld fract(double4 x, __global double4 *iptr);
-double8 __ovld fract(double8 x, __global double8 *iptr);
-double16 __ovld fract(double16 x, __global double16 *iptr);
-double __ovld fract(double x, __local double *iptr);
-double2 __ovld fract(double2 x, __local double2 *iptr);
-double3 __ovld fract(double3 x, __local double3 *iptr);
-double4 __ovld fract(double4 x, __local double4 *iptr);
-double8 __ovld fract(double8 x, __local double8 *iptr);
-double16 __ovld fract(double16 x, __local double16 *iptr);
-double __ovld fract(double x, __private double *iptr);
-double2 __ovld fract(double2 x, __private double2 *iptr);
-double3 __ovld fract(double3 x, __private double3 *iptr);
-double4 __ovld fract(double4 x, __private double4 *iptr);
-double8 __ovld fract(double8 x, __private double8 *iptr);
-double16 __ovld fract(double16 x, __private double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld fract(half x, __global half *iptr);
-half2 __ovld fract(half2 x, __global half2 *iptr);
-half3 __ovld fract(half3 x, __global half3 *iptr);
-half4 __ovld fract(half4 x, __global half4 *iptr);
-half8 __ovld fract(half8 x, __global half8 *iptr);
-half16 __ovld fract(half16 x, __global half16 *iptr);
-half __ovld fract(half x, __local half *iptr);
-half2 __ovld fract(half2 x, __local half2 *iptr);
-half3 __ovld fract(half3 x, __local half3 *iptr);
-half4 __ovld fract(half4 x, __local half4 *iptr);
-half8 __ovld fract(half8 x, __local half8 *iptr);
-half16 __ovld fract(half16 x, __local half16 *iptr);
-half __ovld fract(half x, __private half *iptr);
-half2 __ovld fract(half2 x, __private half2 *iptr);
-half3 __ovld fract(half3 x, __private half3 *iptr);
-half4 __ovld fract(half4 x, __private half4 *iptr);
-half8 __ovld fract(half8 x, __private half8 *iptr);
-half16 __ovld fract(half16 x, __private half16 *iptr);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Extract mantissa and exponent from x. For each
- * component the mantissa returned is a float with
- * magnitude in the interval [1/2, 1) or 0. Each
- * component of x equals mantissa returned * 2^exp.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld frexp(float x, int *exp);
-float2 __ovld frexp(float2 x, int2 *exp);
-float3 __ovld frexp(float3 x, int3 *exp);
-float4 __ovld frexp(float4 x, int4 *exp);
-float8 __ovld frexp(float8 x, int8 *exp);
-float16 __ovld frexp(float16 x, int16 *exp);
-#ifdef cl_khr_fp64
-double __ovld frexp(double x, int *exp);
-double2 __ovld frexp(double2 x, int2 *exp);
-double3 __ovld frexp(double3 x, int3 *exp);
-double4 __ovld frexp(double4 x, int4 *exp);
-double8 __ovld frexp(double8 x, int8 *exp);
-double16 __ovld frexp(double16 x, int16 *exp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld frexp(half x, int *exp);
-half2 __ovld frexp(half2 x, int2 *exp);
-half3 __ovld frexp(half3 x, int3 *exp);
-half4 __ovld frexp(half4 x, int4 *exp);
-half8 __ovld frexp(half8 x, int8 *exp);
-half16 __ovld frexp(half16 x, int16 *exp);
-#endif //cl_khr_fp16
-#else
-float __ovld frexp(float x, __global int *exp);
-float2 __ovld frexp(float2 x, __global int2 *exp);
-float3 __ovld frexp(float3 x, __global int3 *exp);
-float4 __ovld frexp(float4 x, __global int4 *exp);
-float8 __ovld frexp(float8 x, __global int8 *exp);
-float16 __ovld frexp(float16 x, __global int16 *exp);
-float __ovld frexp(float x, __local int *exp);
-float2 __ovld frexp(float2 x, __local int2 *exp);
-float3 __ovld frexp(float3 x, __local int3 *exp);
-float4 __ovld frexp(float4 x, __local int4 *exp);
-float8 __ovld frexp(float8 x, __local int8 *exp);
-float16 __ovld frexp(float16 x, __local int16 *exp);
-float __ovld frexp(float x, __private int *exp);
-float2 __ovld frexp(float2 x, __private int2 *exp);
-float3 __ovld frexp(float3 x, __private int3 *exp);
-float4 __ovld frexp(float4 x, __private int4 *exp);
-float8 __ovld frexp(float8 x, __private int8 *exp);
-float16 __ovld frexp(float16 x, __private int16 *exp);
-#ifdef cl_khr_fp64
-double __ovld frexp(double x, __global int *exp);
-double2 __ovld frexp(double2 x, __global int2 *exp);
-double3 __ovld frexp(double3 x, __global int3 *exp);
-double4 __ovld frexp(double4 x, __global int4 *exp);
-double8 __ovld frexp(double8 x, __global int8 *exp);
-double16 __ovld frexp(double16 x, __global int16 *exp);
-double __ovld frexp(double x, __local int *exp);
-double2 __ovld frexp(double2 x, __local int2 *exp);
-double3 __ovld frexp(double3 x, __local int3 *exp);
-double4 __ovld frexp(double4 x, __local int4 *exp);
-double8 __ovld frexp(double8 x, __local int8 *exp);
-double16 __ovld frexp(double16 x, __local int16 *exp);
-double __ovld frexp(double x, __private int *exp);
-double2 __ovld frexp(double2 x, __private int2 *exp);
-double3 __ovld frexp(double3 x, __private int3 *exp);
-double4 __ovld frexp(double4 x, __private int4 *exp);
-double8 __ovld frexp(double8 x, __private int8 *exp);
-double16 __ovld frexp(double16 x, __private int16 *exp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld frexp(half x, __global int *exp);
-half2 __ovld frexp(half2 x, __global int2 *exp);
-half3 __ovld frexp(half3 x, __global int3 *exp);
-half4 __ovld frexp(half4 x, __global int4 *exp);
-half8 __ovld frexp(half8 x, __global int8 *exp);
-half16 __ovld frexp(half16 x, __global int16 *exp);
-half __ovld frexp(half x, __local int *exp);
-half2 __ovld frexp(half2 x, __local int2 *exp);
-half3 __ovld frexp(half3 x, __local int3 *exp);
-half4 __ovld frexp(half4 x, __local int4 *exp);
-half8 __ovld frexp(half8 x, __local int8 *exp);
-half16 __ovld frexp(half16 x, __local int16 *exp);
-half __ovld frexp(half x, __private int *exp);
-half2 __ovld frexp(half2 x, __private int2 *exp);
-half3 __ovld frexp(half3 x, __private int3 *exp);
-half4 __ovld frexp(half4 x, __private int4 *exp);
-half8 __ovld frexp(half8 x, __private int8 *exp);
-half16 __ovld frexp(half16 x, __private int16 *exp);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Compute the value of the square root of x^2 + y^2
- * without undue overflow or underflow.
- */
-float __ovld __cnfn hypot(float x, float y);
-float2 __ovld __cnfn hypot(float2 x, float2 y);
-float3 __ovld __cnfn hypot(float3 x, float3 y);
-float4 __ovld __cnfn hypot(float4 x, float4 y);
-float8 __ovld __cnfn hypot(float8 x, float8 y);
-float16 __ovld __cnfn hypot(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn hypot(double x, double y);
-double2 __ovld __cnfn hypot(double2 x, double2 y);
-double3 __ovld __cnfn hypot(double3 x, double3 y);
-double4 __ovld __cnfn hypot(double4 x, double4 y);
-double8 __ovld __cnfn hypot(double8 x, double8 y);
-double16 __ovld __cnfn hypot(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn hypot(half x, half y);
-half2 __ovld __cnfn hypot(half2 x, half2 y);
-half3 __ovld __cnfn hypot(half3 x, half3 y);
-half4 __ovld __cnfn hypot(half4 x, half4 y);
-half8 __ovld __cnfn hypot(half8 x, half8 y);
-half16 __ovld __cnfn hypot(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Return the exponent as an integer value.
- */
-int __ovld __cnfn ilogb(float x);
-int2 __ovld __cnfn ilogb(float2 x);
-int3 __ovld __cnfn ilogb(float3 x);
-int4 __ovld __cnfn ilogb(float4 x);
-int8 __ovld __cnfn ilogb(float8 x);
-int16 __ovld __cnfn ilogb(float16 x);
-#ifdef cl_khr_fp64
-int __ovld __cnfn ilogb(double x);
-int2 __ovld __cnfn ilogb(double2 x);
-int3 __ovld __cnfn ilogb(double3 x);
-int4 __ovld __cnfn ilogb(double4 x);
-int8 __ovld __cnfn ilogb(double8 x);
-int16 __ovld __cnfn ilogb(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn ilogb(half x);
-int2 __ovld __cnfn ilogb(half2 x);
-int3 __ovld __cnfn ilogb(half3 x);
-int4 __ovld __cnfn ilogb(half4 x);
-int8 __ovld __cnfn ilogb(half8 x);
-int16 __ovld __cnfn ilogb(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Multiply x by 2 to the power n.
- */
-float __ovld __cnfn ldexp(float x, int n);
-float2 __ovld __cnfn ldexp(float2 x, int2 n);
-float3 __ovld __cnfn ldexp(float3 x, int3 n);
-float4 __ovld __cnfn ldexp(float4 x, int4 n);
-float8 __ovld __cnfn ldexp(float8 x, int8 n);
-float16 __ovld __cnfn ldexp(float16 x, int16 n);
-float2 __ovld __cnfn ldexp(float2 x, int n);
-float3 __ovld __cnfn ldexp(float3 x, int n);
-float4 __ovld __cnfn ldexp(float4 x, int n);
-float8 __ovld __cnfn ldexp(float8 x, int n);
-float16 __ovld __cnfn ldexp(float16 x, int n);
-#ifdef cl_khr_fp64
-double __ovld __cnfn ldexp(double x, int n);
-double2 __ovld __cnfn ldexp(double2 x, int2 n);
-double3 __ovld __cnfn ldexp(double3 x, int3 n);
-double4 __ovld __cnfn ldexp(double4 x, int4 n);
-double8 __ovld __cnfn ldexp(double8 x, int8 n);
-double16 __ovld __cnfn ldexp(double16 x, int16 n);
-double2 __ovld __cnfn ldexp(double2 x, int n);
-double3 __ovld __cnfn ldexp(double3 x, int n);
-double4 __ovld __cnfn ldexp(double4 x, int n);
-double8 __ovld __cnfn ldexp(double8 x, int n);
-double16 __ovld __cnfn ldexp(double16 x, int n);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn ldexp(half x, int n);
-half2 __ovld __cnfn ldexp(half2 x, int2 n);
-half3 __ovld __cnfn ldexp(half3 x, int3 n);
-half4 __ovld __cnfn ldexp(half4 x, int4 n);
-half8 __ovld __cnfn ldexp(half8 x, int8 n);
-half16 __ovld __cnfn ldexp(half16 x, int16 n);
-half2 __ovld __cnfn ldexp(half2 x, int n);
-half3 __ovld __cnfn ldexp(half3 x, int n);
-half4 __ovld __cnfn ldexp(half4 x, int n);
-half8 __ovld __cnfn ldexp(half8 x, int n);
-half16 __ovld __cnfn ldexp(half16 x, int n);
-#endif //cl_khr_fp16
-
-/**
- * Log gamma function. Returns the natural
- * logarithm of the absolute value of the gamma
- * function. The sign of the gamma function is
- * returned in the signp argument of lgamma_r.
- */
-float __ovld __cnfn lgamma(float x);
-float2 __ovld __cnfn lgamma(float2 x);
-float3 __ovld __cnfn lgamma(float3 x);
-float4 __ovld __cnfn lgamma(float4 x);
-float8 __ovld __cnfn lgamma(float8 x);
-float16 __ovld __cnfn lgamma(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn lgamma(double x);
-double2 __ovld __cnfn lgamma(double2 x);
-double3 __ovld __cnfn lgamma(double3 x);
-double4 __ovld __cnfn lgamma(double4 x);
-double8 __ovld __cnfn lgamma(double8 x);
-double16 __ovld __cnfn lgamma(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn lgamma(half x);
-half2 __ovld __cnfn lgamma(half2 x);
-half3 __ovld __cnfn lgamma(half3 x);
-half4 __ovld __cnfn lgamma(half4 x);
-half8 __ovld __cnfn lgamma(half8 x);
-half16 __ovld __cnfn lgamma(half16 x);
-#endif //cl_khr_fp16
-
-#if defined(__opencl_c_generic_address_space)
-float __ovld lgamma_r(float x, int *signp);
-float2 __ovld lgamma_r(float2 x, int2 *signp);
-float3 __ovld lgamma_r(float3 x, int3 *signp);
-float4 __ovld lgamma_r(float4 x, int4 *signp);
-float8 __ovld lgamma_r(float8 x, int8 *signp);
-float16 __ovld lgamma_r(float16 x, int16 *signp);
-#ifdef cl_khr_fp64
-double __ovld lgamma_r(double x, int *signp);
-double2 __ovld lgamma_r(double2 x, int2 *signp);
-double3 __ovld lgamma_r(double3 x, int3 *signp);
-double4 __ovld lgamma_r(double4 x, int4 *signp);
-double8 __ovld lgamma_r(double8 x, int8 *signp);
-double16 __ovld lgamma_r(double16 x, int16 *signp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld lgamma_r(half x, int *signp);
-half2 __ovld lgamma_r(half2 x, int2 *signp);
-half3 __ovld lgamma_r(half3 x, int3 *signp);
-half4 __ovld lgamma_r(half4 x, int4 *signp);
-half8 __ovld lgamma_r(half8 x, int8 *signp);
-half16 __ovld lgamma_r(half16 x, int16 *signp);
-#endif //cl_khr_fp16
-#else
-float __ovld lgamma_r(float x, __global int *signp);
-float2 __ovld lgamma_r(float2 x, __global int2 *signp);
-float3 __ovld lgamma_r(float3 x, __global int3 *signp);
-float4 __ovld lgamma_r(float4 x, __global int4 *signp);
-float8 __ovld lgamma_r(float8 x, __global int8 *signp);
-float16 __ovld lgamma_r(float16 x, __global int16 *signp);
-float __ovld lgamma_r(float x, __local int *signp);
-float2 __ovld lgamma_r(float2 x, __local int2 *signp);
-float3 __ovld lgamma_r(float3 x, __local int3 *signp);
-float4 __ovld lgamma_r(float4 x, __local int4 *signp);
-float8 __ovld lgamma_r(float8 x, __local int8 *signp);
-float16 __ovld lgamma_r(float16 x, __local int16 *signp);
-float __ovld lgamma_r(float x, __private int *signp);
-float2 __ovld lgamma_r(float2 x, __private int2 *signp);
-float3 __ovld lgamma_r(float3 x, __private int3 *signp);
-float4 __ovld lgamma_r(float4 x, __private int4 *signp);
-float8 __ovld lgamma_r(float8 x, __private int8 *signp);
-float16 __ovld lgamma_r(float16 x, __private int16 *signp);
-#ifdef cl_khr_fp64
-double __ovld lgamma_r(double x, __global int *signp);
-double2 __ovld lgamma_r(double2 x, __global int2 *signp);
-double3 __ovld lgamma_r(double3 x, __global int3 *signp);
-double4 __ovld lgamma_r(double4 x, __global int4 *signp);
-double8 __ovld lgamma_r(double8 x, __global int8 *signp);
-double16 __ovld lgamma_r(double16 x, __global int16 *signp);
-double __ovld lgamma_r(double x, __local int *signp);
-double2 __ovld lgamma_r(double2 x, __local int2 *signp);
-double3 __ovld lgamma_r(double3 x, __local int3 *signp);
-double4 __ovld lgamma_r(double4 x, __local int4 *signp);
-double8 __ovld lgamma_r(double8 x, __local int8 *signp);
-double16 __ovld lgamma_r(double16 x, __local int16 *signp);
-double __ovld lgamma_r(double x, __private int *signp);
-double2 __ovld lgamma_r(double2 x, __private int2 *signp);
-double3 __ovld lgamma_r(double3 x, __private int3 *signp);
-double4 __ovld lgamma_r(double4 x, __private int4 *signp);
-double8 __ovld lgamma_r(double8 x, __private int8 *signp);
-double16 __ovld lgamma_r(double16 x, __private int16 *signp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld lgamma_r(half x, __global int *signp);
-half2 __ovld lgamma_r(half2 x, __global int2 *signp);
-half3 __ovld lgamma_r(half3 x, __global int3 *signp);
-half4 __ovld lgamma_r(half4 x, __global int4 *signp);
-half8 __ovld lgamma_r(half8 x, __global int8 *signp);
-half16 __ovld lgamma_r(half16 x, __global int16 *signp);
-half __ovld lgamma_r(half x, __local int *signp);
-half2 __ovld lgamma_r(half2 x, __local int2 *signp);
-half3 __ovld lgamma_r(half3 x, __local int3 *signp);
-half4 __ovld lgamma_r(half4 x, __local int4 *signp);
-half8 __ovld lgamma_r(half8 x, __local int8 *signp);
-half16 __ovld lgamma_r(half16 x, __local int16 *signp);
-half __ovld lgamma_r(half x, __private int *signp);
-half2 __ovld lgamma_r(half2 x, __private int2 *signp);
-half3 __ovld lgamma_r(half3 x, __private int3 *signp);
-half4 __ovld lgamma_r(half4 x, __private int4 *signp);
-half8 __ovld lgamma_r(half8 x, __private int8 *signp);
-half16 __ovld lgamma_r(half16 x, __private int16 *signp);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Compute natural logarithm.
- */
-float __ovld __cnfn log(float);
-float2 __ovld __cnfn log(float2);
-float3 __ovld __cnfn log(float3);
-float4 __ovld __cnfn log(float4);
-float8 __ovld __cnfn log(float8);
-float16 __ovld __cnfn log(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log(double);
-double2 __ovld __cnfn log(double2);
-double3 __ovld __cnfn log(double3);
-double4 __ovld __cnfn log(double4);
-double8 __ovld __cnfn log(double8);
-double16 __ovld __cnfn log(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log(half);
-half2 __ovld __cnfn log(half2);
-half3 __ovld __cnfn log(half3);
-half4 __ovld __cnfn log(half4);
-half8 __ovld __cnfn log(half8);
-half16 __ovld __cnfn log(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute a base 2 logarithm.
- */
-float __ovld __cnfn log2(float);
-float2 __ovld __cnfn log2(float2);
-float3 __ovld __cnfn log2(float3);
-float4 __ovld __cnfn log2(float4);
-float8 __ovld __cnfn log2(float8);
-float16 __ovld __cnfn log2(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log2(double);
-double2 __ovld __cnfn log2(double2);
-double3 __ovld __cnfn log2(double3);
-double4 __ovld __cnfn log2(double4);
-double8 __ovld __cnfn log2(double8);
-double16 __ovld __cnfn log2(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log2(half);
-half2 __ovld __cnfn log2(half2);
-half3 __ovld __cnfn log2(half3);
-half4 __ovld __cnfn log2(half4);
-half8 __ovld __cnfn log2(half8);
-half16 __ovld __cnfn log2(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute a base 10 logarithm.
- */
-float __ovld __cnfn log10(float);
-float2 __ovld __cnfn log10(float2);
-float3 __ovld __cnfn log10(float3);
-float4 __ovld __cnfn log10(float4);
-float8 __ovld __cnfn log10(float8);
-float16 __ovld __cnfn log10(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log10(double);
-double2 __ovld __cnfn log10(double2);
-double3 __ovld __cnfn log10(double3);
-double4 __ovld __cnfn log10(double4);
-double8 __ovld __cnfn log10(double8);
-double16 __ovld __cnfn log10(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log10(half);
-half2 __ovld __cnfn log10(half2);
-half3 __ovld __cnfn log10(half3);
-half4 __ovld __cnfn log10(half4);
-half8 __ovld __cnfn log10(half8);
-half16 __ovld __cnfn log10(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute a base e logarithm of (1.0 + x).
- */
-float __ovld __cnfn log1p(float x);
-float2 __ovld __cnfn log1p(float2 x);
-float3 __ovld __cnfn log1p(float3 x);
-float4 __ovld __cnfn log1p(float4 x);
-float8 __ovld __cnfn log1p(float8 x);
-float16 __ovld __cnfn log1p(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log1p(double x);
-double2 __ovld __cnfn log1p(double2 x);
-double3 __ovld __cnfn log1p(double3 x);
-double4 __ovld __cnfn log1p(double4 x);
-double8 __ovld __cnfn log1p(double8 x);
-double16 __ovld __cnfn log1p(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log1p(half x);
-half2 __ovld __cnfn log1p(half2 x);
-half3 __ovld __cnfn log1p(half3 x);
-half4 __ovld __cnfn log1p(half4 x);
-half8 __ovld __cnfn log1p(half8 x);
-half16 __ovld __cnfn log1p(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute the exponent of x, which is the integral
- * part of logr | x |.
- */
-float __ovld __cnfn logb(float x);
-float2 __ovld __cnfn logb(float2 x);
-float3 __ovld __cnfn logb(float3 x);
-float4 __ovld __cnfn logb(float4 x);
-float8 __ovld __cnfn logb(float8 x);
-float16 __ovld __cnfn logb(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn logb(double x);
-double2 __ovld __cnfn logb(double2 x);
-double3 __ovld __cnfn logb(double3 x);
-double4 __ovld __cnfn logb(double4 x);
-double8 __ovld __cnfn logb(double8 x);
-double16 __ovld __cnfn logb(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn logb(half x);
-half2 __ovld __cnfn logb(half2 x);
-half3 __ovld __cnfn logb(half3 x);
-half4 __ovld __cnfn logb(half4 x);
-half8 __ovld __cnfn logb(half8 x);
-half16 __ovld __cnfn logb(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * mad approximates a * b + c. Whether or how the
- * product of a * b is rounded and how supernormal or
- * subnormal intermediate products are handled is not
- * defined. mad is intended to be used where speed is
- * preferred over accuracy.
- */
-float __ovld __cnfn mad(float a, float b, float c);
-float2 __ovld __cnfn mad(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn mad(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn mad(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn mad(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn mad(float16 a, float16 b, float16 c);
-#ifdef cl_khr_fp64
-double __ovld __cnfn mad(double a, double b, double c);
-double2 __ovld __cnfn mad(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn mad(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn mad(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn mad(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn mad(double16 a, double16 b, double16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn mad(half a, half b, half c);
-half2 __ovld __cnfn mad(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn mad(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn mad(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn mad(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn mad(half16 a, half16 b, half16 c);
-#endif //cl_khr_fp16
-
-/**
- * Returns x if | x | > | y |, y if | y | > | x |, otherwise
- * fmax(x, y).
- */
-float __ovld __cnfn maxmag(float x, float y);
-float2 __ovld __cnfn maxmag(float2 x, float2 y);
-float3 __ovld __cnfn maxmag(float3 x, float3 y);
-float4 __ovld __cnfn maxmag(float4 x, float4 y);
-float8 __ovld __cnfn maxmag(float8 x, float8 y);
-float16 __ovld __cnfn maxmag(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn maxmag(double x, double y);
-double2 __ovld __cnfn maxmag(double2 x, double2 y);
-double3 __ovld __cnfn maxmag(double3 x, double3 y);
-double4 __ovld __cnfn maxmag(double4 x, double4 y);
-double8 __ovld __cnfn maxmag(double8 x, double8 y);
-double16 __ovld __cnfn maxmag(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn maxmag(half x, half y);
-half2 __ovld __cnfn maxmag(half2 x, half2 y);
-half3 __ovld __cnfn maxmag(half3 x, half3 y);
-half4 __ovld __cnfn maxmag(half4 x, half4 y);
-half8 __ovld __cnfn maxmag(half8 x, half8 y);
-half16 __ovld __cnfn maxmag(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns x if | x | < | y |, y if | y | < | x |, otherwise
- * fmin(x, y).
- */
-float __ovld __cnfn minmag(float x, float y);
-float2 __ovld __cnfn minmag(float2 x, float2 y);
-float3 __ovld __cnfn minmag(float3 x, float3 y);
-float4 __ovld __cnfn minmag(float4 x, float4 y);
-float8 __ovld __cnfn minmag(float8 x, float8 y);
-float16 __ovld __cnfn minmag(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn minmag(double x, double y);
-double2 __ovld __cnfn minmag(double2 x, double2 y);
-double3 __ovld __cnfn minmag(double3 x, double3 y);
-double4 __ovld __cnfn minmag(double4 x, double4 y);
-double8 __ovld __cnfn minmag(double8 x, double8 y);
-double16 __ovld __cnfn minmag(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn minmag(half x, half y);
-half2 __ovld __cnfn minmag(half2 x, half2 y);
-half3 __ovld __cnfn minmag(half3 x, half3 y);
-half4 __ovld __cnfn minmag(half4 x, half4 y);
-half8 __ovld __cnfn minmag(half8 x, half8 y);
-half16 __ovld __cnfn minmag(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Decompose a floating-point number. The modf
- * function breaks the argument x into integral and
- * fractional parts, each of which has the same sign as
- * the argument. It stores the integral part in the object
- * pointed to by iptr.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld modf(float x, float *iptr);
-float2 __ovld modf(float2 x, float2 *iptr);
-float3 __ovld modf(float3 x, float3 *iptr);
-float4 __ovld modf(float4 x, float4 *iptr);
-float8 __ovld modf(float8 x, float8 *iptr);
-float16 __ovld modf(float16 x, float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld modf(double x, double *iptr);
-double2 __ovld modf(double2 x, double2 *iptr);
-double3 __ovld modf(double3 x, double3 *iptr);
-double4 __ovld modf(double4 x, double4 *iptr);
-double8 __ovld modf(double8 x, double8 *iptr);
-double16 __ovld modf(double16 x, double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld modf(half x, half *iptr);
-half2 __ovld modf(half2 x, half2 *iptr);
-half3 __ovld modf(half3 x, half3 *iptr);
-half4 __ovld modf(half4 x, half4 *iptr);
-half8 __ovld modf(half8 x, half8 *iptr);
-half16 __ovld modf(half16 x, half16 *iptr);
-#endif //cl_khr_fp16
-#else
-float __ovld modf(float x, __global float *iptr);
-float2 __ovld modf(float2 x, __global float2 *iptr);
-float3 __ovld modf(float3 x, __global float3 *iptr);
-float4 __ovld modf(float4 x, __global float4 *iptr);
-float8 __ovld modf(float8 x, __global float8 *iptr);
-float16 __ovld modf(float16 x, __global float16 *iptr);
-float __ovld modf(float x, __local float *iptr);
-float2 __ovld modf(float2 x, __local float2 *iptr);
-float3 __ovld modf(float3 x, __local float3 *iptr);
-float4 __ovld modf(float4 x, __local float4 *iptr);
-float8 __ovld modf(float8 x, __local float8 *iptr);
-float16 __ovld modf(float16 x, __local float16 *iptr);
-float __ovld modf(float x, __private float *iptr);
-float2 __ovld modf(float2 x, __private float2 *iptr);
-float3 __ovld modf(float3 x, __private float3 *iptr);
-float4 __ovld modf(float4 x, __private float4 *iptr);
-float8 __ovld modf(float8 x, __private float8 *iptr);
-float16 __ovld modf(float16 x, __private float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld modf(double x, __global double *iptr);
-double2 __ovld modf(double2 x, __global double2 *iptr);
-double3 __ovld modf(double3 x, __global double3 *iptr);
-double4 __ovld modf(double4 x, __global double4 *iptr);
-double8 __ovld modf(double8 x, __global double8 *iptr);
-double16 __ovld modf(double16 x, __global double16 *iptr);
-double __ovld modf(double x, __local double *iptr);
-double2 __ovld modf(double2 x, __local double2 *iptr);
-double3 __ovld modf(double3 x, __local double3 *iptr);
-double4 __ovld modf(double4 x, __local double4 *iptr);
-double8 __ovld modf(double8 x, __local double8 *iptr);
-double16 __ovld modf(double16 x, __local double16 *iptr);
-double __ovld modf(double x, __private double *iptr);
-double2 __ovld modf(double2 x, __private double2 *iptr);
-double3 __ovld modf(double3 x, __private double3 *iptr);
-double4 __ovld modf(double4 x, __private double4 *iptr);
-double8 __ovld modf(double8 x, __private double8 *iptr);
-double16 __ovld modf(double16 x, __private double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld modf(half x, __global half *iptr);
-half2 __ovld modf(half2 x, __global half2 *iptr);
-half3 __ovld modf(half3 x, __global half3 *iptr);
-half4 __ovld modf(half4 x, __global half4 *iptr);
-half8 __ovld modf(half8 x, __global half8 *iptr);
-half16 __ovld modf(half16 x, __global half16 *iptr);
-half __ovld modf(half x, __local half *iptr);
-half2 __ovld modf(half2 x, __local half2 *iptr);
-half3 __ovld modf(half3 x, __local half3 *iptr);
-half4 __ovld modf(half4 x, __local half4 *iptr);
-half8 __ovld modf(half8 x, __local half8 *iptr);
-half16 __ovld modf(half16 x, __local half16 *iptr);
-half __ovld modf(half x, __private half *iptr);
-half2 __ovld modf(half2 x, __private half2 *iptr);
-half3 __ovld modf(half3 x, __private half3 *iptr);
-half4 __ovld modf(half4 x, __private half4 *iptr);
-half8 __ovld modf(half8 x, __private half8 *iptr);
-half16 __ovld modf(half16 x, __private half16 *iptr);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Returns a quiet NaN. The nancode may be placed
- * in the significand of the resulting NaN.
- */
-float __ovld __cnfn nan(uint nancode);
-float2 __ovld __cnfn nan(uint2 nancode);
-float3 __ovld __cnfn nan(uint3 nancode);
-float4 __ovld __cnfn nan(uint4 nancode);
-float8 __ovld __cnfn nan(uint8 nancode);
-float16 __ovld __cnfn nan(uint16 nancode);
-#ifdef cl_khr_fp64
-double __ovld __cnfn nan(ulong nancode);
-double2 __ovld __cnfn nan(ulong2 nancode);
-double3 __ovld __cnfn nan(ulong3 nancode);
-double4 __ovld __cnfn nan(ulong4 nancode);
-double8 __ovld __cnfn nan(ulong8 nancode);
-double16 __ovld __cnfn nan(ulong16 nancode);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn nan(ushort nancode);
-half2 __ovld __cnfn nan(ushort2 nancode);
-half3 __ovld __cnfn nan(ushort3 nancode);
-half4 __ovld __cnfn nan(ushort4 nancode);
-half8 __ovld __cnfn nan(ushort8 nancode);
-half16 __ovld __cnfn nan(ushort16 nancode);
-#endif //cl_khr_fp16
-
-/**
- * Computes the next representable single-precision
- * floating-point value following x in the direction of
- * y. Thus, if y is less than x, nextafter() returns the
- * largest representable floating-point number less
- * than x.
- */
-float __ovld __cnfn nextafter(float x, float y);
-float2 __ovld __cnfn nextafter(float2 x, float2 y);
-float3 __ovld __cnfn nextafter(float3 x, float3 y);
-float4 __ovld __cnfn nextafter(float4 x, float4 y);
-float8 __ovld __cnfn nextafter(float8 x, float8 y);
-float16 __ovld __cnfn nextafter(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn nextafter(double x, double y);
-double2 __ovld __cnfn nextafter(double2 x, double2 y);
-double3 __ovld __cnfn nextafter(double3 x, double3 y);
-double4 __ovld __cnfn nextafter(double4 x, double4 y);
-double8 __ovld __cnfn nextafter(double8 x, double8 y);
-double16 __ovld __cnfn nextafter(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn nextafter(half x, half y);
-half2 __ovld __cnfn nextafter(half2 x, half2 y);
-half3 __ovld __cnfn nextafter(half3 x, half3 y);
-half4 __ovld __cnfn nextafter(half4 x, half4 y);
-half8 __ovld __cnfn nextafter(half8 x, half8 y);
-half16 __ovld __cnfn nextafter(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power y.
- */
-float __ovld __cnfn pow(float x, float y);
-float2 __ovld __cnfn pow(float2 x, float2 y);
-float3 __ovld __cnfn pow(float3 x, float3 y);
-float4 __ovld __cnfn pow(float4 x, float4 y);
-float8 __ovld __cnfn pow(float8 x, float8 y);
-float16 __ovld __cnfn pow(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn pow(double x, double y);
-double2 __ovld __cnfn pow(double2 x, double2 y);
-double3 __ovld __cnfn pow(double3 x, double3 y);
-double4 __ovld __cnfn pow(double4 x, double4 y);
-double8 __ovld __cnfn pow(double8 x, double8 y);
-double16 __ovld __cnfn pow(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn pow(half x, half y);
-half2 __ovld __cnfn pow(half2 x, half2 y);
-half3 __ovld __cnfn pow(half3 x, half3 y);
-half4 __ovld __cnfn pow(half4 x, half4 y);
-half8 __ovld __cnfn pow(half8 x, half8 y);
-half16 __ovld __cnfn pow(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power y, where y is an integer.
- */
-float __ovld __cnfn pown(float x, int y);
-float2 __ovld __cnfn pown(float2 x, int2 y);
-float3 __ovld __cnfn pown(float3 x, int3 y);
-float4 __ovld __cnfn pown(float4 x, int4 y);
-float8 __ovld __cnfn pown(float8 x, int8 y);
-float16 __ovld __cnfn pown(float16 x, int16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn pown(double x, int y);
-double2 __ovld __cnfn pown(double2 x, int2 y);
-double3 __ovld __cnfn pown(double3 x, int3 y);
-double4 __ovld __cnfn pown(double4 x, int4 y);
-double8 __ovld __cnfn pown(double8 x, int8 y);
-double16 __ovld __cnfn pown(double16 x, int16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn pown(half x, int y);
-half2 __ovld __cnfn pown(half2 x, int2 y);
-half3 __ovld __cnfn pown(half3 x, int3 y);
-half4 __ovld __cnfn pown(half4 x, int4 y);
-half8 __ovld __cnfn pown(half8 x, int8 y);
-half16 __ovld __cnfn pown(half16 x, int16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power y, where x is >= 0.
- */
-float __ovld __cnfn powr(float x, float y);
-float2 __ovld __cnfn powr(float2 x, float2 y);
-float3 __ovld __cnfn powr(float3 x, float3 y);
-float4 __ovld __cnfn powr(float4 x, float4 y);
-float8 __ovld __cnfn powr(float8 x, float8 y);
-float16 __ovld __cnfn powr(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn powr(double x, double y);
-double2 __ovld __cnfn powr(double2 x, double2 y);
-double3 __ovld __cnfn powr(double3 x, double3 y);
-double4 __ovld __cnfn powr(double4 x, double4 y);
-double8 __ovld __cnfn powr(double8 x, double8 y);
-double16 __ovld __cnfn powr(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn powr(half x, half y);
-half2 __ovld __cnfn powr(half2 x, half2 y);
-half3 __ovld __cnfn powr(half3 x, half3 y);
-half4 __ovld __cnfn powr(half4 x, half4 y);
-half8 __ovld __cnfn powr(half8 x, half8 y);
-half16 __ovld __cnfn powr(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute the value r such that r = x - n*y, where n
- * is the integer nearest the exact value of x/y. If there
- * are two integers closest to x/y, n shall be the even
- * one. If r is zero, it is given the same sign as x.
- */
-float __ovld __cnfn remainder(float x, float y);
-float2 __ovld __cnfn remainder(float2 x, float2 y);
-float3 __ovld __cnfn remainder(float3 x, float3 y);
-float4 __ovld __cnfn remainder(float4 x, float4 y);
-float8 __ovld __cnfn remainder(float8 x, float8 y);
-float16 __ovld __cnfn remainder(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn remainder(double x, double y);
-double2 __ovld __cnfn remainder(double2 x, double2 y);
-double3 __ovld __cnfn remainder(double3 x, double3 y);
-double4 __ovld __cnfn remainder(double4 x, double4 y);
-double8 __ovld __cnfn remainder(double8 x, double8 y);
-double16 __ovld __cnfn remainder(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn remainder(half x, half y);
-half2 __ovld __cnfn remainder(half2 x, half2 y);
-half3 __ovld __cnfn remainder(half3 x, half3 y);
-half4 __ovld __cnfn remainder(half4 x, half4 y);
-half8 __ovld __cnfn remainder(half8 x, half8 y);
-half16 __ovld __cnfn remainder(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * The remquo function computes the value r such
- * that r = x - n*y, where n is the integer nearest the
- * exact value of x/y. If there are two integers closest
- * to x/y, n shall be the even one. If r is zero, it is
- * given the same sign as x. This is the same value
- * that is returned by the remainder function.
- * remquo also calculates the lower seven bits of the
- * integral quotient x/y, and gives that value the same
- * sign as x/y. It stores this signed value in the object
- * pointed to by quo.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld remquo(float x, float y, int *quo);
-float2 __ovld remquo(float2 x, float2 y, int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, int16 *quo);
-#ifdef cl_khr_fp64
-double __ovld remquo(double x, double y, int *quo);
-double2 __ovld remquo(double2 x, double2 y, int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, int16 *quo);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld remquo(half x, half y, int *quo);
-half2 __ovld remquo(half2 x, half2 y, int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, int16 *quo);
-
-#endif //cl_khr_fp16
-#else
-float __ovld remquo(float x, float y, __global int *quo);
-float2 __ovld remquo(float2 x, float2 y, __global int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __global int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __global int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __global int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __global int16 *quo);
-float __ovld remquo(float x, float y, __local int *quo);
-float2 __ovld remquo(float2 x, float2 y, __local int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __local int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __local int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __local int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __local int16 *quo);
-float __ovld remquo(float x, float y, __private int *quo);
-float2 __ovld remquo(float2 x, float2 y, __private int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __private int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __private int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __private int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __private int16 *quo);
-#ifdef cl_khr_fp64
-double __ovld remquo(double x, double y, __global int *quo);
-double2 __ovld remquo(double2 x, double2 y, __global int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __global int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __global int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __global int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __global int16 *quo);
-double __ovld remquo(double x, double y, __local int *quo);
-double2 __ovld remquo(double2 x, double2 y, __local int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __local int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __local int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __local int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __local int16 *quo);
-double __ovld remquo(double x, double y, __private int *quo);
-double2 __ovld remquo(double2 x, double2 y, __private int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __private int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __private int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __private int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __private int16 *quo);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld remquo(half x, half y, __global int *quo);
-half2 __ovld remquo(half2 x, half2 y, __global int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __global int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __global int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __global int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __global int16 *quo);
-half __ovld remquo(half x, half y, __local int *quo);
-half2 __ovld remquo(half2 x, half2 y, __local int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __local int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __local int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __local int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __local int16 *quo);
-half __ovld remquo(half x, half y, __private int *quo);
-half2 __ovld remquo(half2 x, half2 y, __private int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __private int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __private int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-/**
- * Round to integral value (using round to nearest
- * even rounding mode) in floating-point format.
- * Refer to section 7.1 for description of rounding
- * modes.
- */
-float __ovld __cnfn rint(float);
-float2 __ovld __cnfn rint(float2);
-float3 __ovld __cnfn rint(float3);
-float4 __ovld __cnfn rint(float4);
-float8 __ovld __cnfn rint(float8);
-float16 __ovld __cnfn rint(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn rint(double);
-double2 __ovld __cnfn rint(double2);
-double3 __ovld __cnfn rint(double3);
-double4 __ovld __cnfn rint(double4);
-double8 __ovld __cnfn rint(double8);
-double16 __ovld __cnfn rint(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn rint(half);
-half2 __ovld __cnfn rint(half2);
-half3 __ovld __cnfn rint(half3);
-half4 __ovld __cnfn rint(half4);
-half8 __ovld __cnfn rint(half8);
-half16 __ovld __cnfn rint(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power 1/y.
- */
-float __ovld __cnfn rootn(float x, int y);
-float2 __ovld __cnfn rootn(float2 x, int2 y);
-float3 __ovld __cnfn rootn(float3 x, int3 y);
-float4 __ovld __cnfn rootn(float4 x, int4 y);
-float8 __ovld __cnfn rootn(float8 x, int8 y);
-float16 __ovld __cnfn rootn(float16 x, int16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn rootn(double x, int y);
-double2 __ovld __cnfn rootn(double2 x, int2 y);
-double3 __ovld __cnfn rootn(double3 x, int3 y);
-double4 __ovld __cnfn rootn(double4 x, int4 y);
-double8 __ovld __cnfn rootn(double8 x, int8 y);
-double16 __ovld __cnfn rootn(double16 x, int16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn rootn(half x, int y);
-half2 __ovld __cnfn rootn(half2 x, int2 y);
-half3 __ovld __cnfn rootn(half3 x, int3 y);
-half4 __ovld __cnfn rootn(half4 x, int4 y);
-half8 __ovld __cnfn rootn(half8 x, int8 y);
-half16 __ovld __cnfn rootn(half16 x, int16 y);
-#endif //cl_khr_fp16
-
-/**
- * Return the integral value nearest to x rounding
- * halfway cases away from zero, regardless of the
- * current rounding direction.
- */
-float __ovld __cnfn round(float x);
-float2 __ovld __cnfn round(float2 x);
-float3 __ovld __cnfn round(float3 x);
-float4 __ovld __cnfn round(float4 x);
-float8 __ovld __cnfn round(float8 x);
-float16 __ovld __cnfn round(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn round(double x);
-double2 __ovld __cnfn round(double2 x);
-double3 __ovld __cnfn round(double3 x);
-double4 __ovld __cnfn round(double4 x);
-double8 __ovld __cnfn round(double8 x);
-double16 __ovld __cnfn round(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn round(half x);
-half2 __ovld __cnfn round(half2 x);
-half3 __ovld __cnfn round(half3 x);
-half4 __ovld __cnfn round(half4 x);
-half8 __ovld __cnfn round(half8 x);
-half16 __ovld __cnfn round(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute inverse square root.
- */
-float __ovld __cnfn rsqrt(float);
-float2 __ovld __cnfn rsqrt(float2);
-float3 __ovld __cnfn rsqrt(float3);
-float4 __ovld __cnfn rsqrt(float4);
-float8 __ovld __cnfn rsqrt(float8);
-float16 __ovld __cnfn rsqrt(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn rsqrt(double);
-double2 __ovld __cnfn rsqrt(double2);
-double3 __ovld __cnfn rsqrt(double3);
-double4 __ovld __cnfn rsqrt(double4);
-double8 __ovld __cnfn rsqrt(double8);
-double16 __ovld __cnfn rsqrt(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn rsqrt(half);
-half2 __ovld __cnfn rsqrt(half2);
-half3 __ovld __cnfn rsqrt(half3);
-half4 __ovld __cnfn rsqrt(half4);
-half8 __ovld __cnfn rsqrt(half8);
-half16 __ovld __cnfn rsqrt(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute sine.
- */
-float __ovld __cnfn sin(float);
-float2 __ovld __cnfn sin(float2);
-float3 __ovld __cnfn sin(float3);
-float4 __ovld __cnfn sin(float4);
-float8 __ovld __cnfn sin(float8);
-float16 __ovld __cnfn sin(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sin(double);
-double2 __ovld __cnfn sin(double2);
-double3 __ovld __cnfn sin(double3);
-double4 __ovld __cnfn sin(double4);
-double8 __ovld __cnfn sin(double8);
-double16 __ovld __cnfn sin(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sin(half);
-half2 __ovld __cnfn sin(half2);
-half3 __ovld __cnfn sin(half3);
-half4 __ovld __cnfn sin(half4);
-half8 __ovld __cnfn sin(half8);
-half16 __ovld __cnfn sin(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute sine and cosine of x. The computed sine
- * is the return value and computed cosine is returned
- * in cosval.
- */
-#if defined(__opencl_c_generic_address_space)
-float __ovld sincos(float x, float *cosval);
-float2 __ovld sincos(float2 x, float2 *cosval);
-float3 __ovld sincos(float3 x, float3 *cosval);
-float4 __ovld sincos(float4 x, float4 *cosval);
-float8 __ovld sincos(float8 x, float8 *cosval);
-float16 __ovld sincos(float16 x, float16 *cosval);
-#ifdef cl_khr_fp64
-double __ovld sincos(double x, double *cosval);
-double2 __ovld sincos(double2 x, double2 *cosval);
-double3 __ovld sincos(double3 x, double3 *cosval);
-double4 __ovld sincos(double4 x, double4 *cosval);
-double8 __ovld sincos(double8 x, double8 *cosval);
-double16 __ovld sincos(double16 x, double16 *cosval);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld sincos(half x, half *cosval);
-half2 __ovld sincos(half2 x, half2 *cosval);
-half3 __ovld sincos(half3 x, half3 *cosval);
-half4 __ovld sincos(half4 x, half4 *cosval);
-half8 __ovld sincos(half8 x, half8 *cosval);
-half16 __ovld sincos(half16 x, half16 *cosval);
-#endif //cl_khr_fp16
-#else
-float __ovld sincos(float x, __global float *cosval);
-float2 __ovld sincos(float2 x, __global float2 *cosval);
-float3 __ovld sincos(float3 x, __global float3 *cosval);
-float4 __ovld sincos(float4 x, __global float4 *cosval);
-float8 __ovld sincos(float8 x, __global float8 *cosval);
-float16 __ovld sincos(float16 x, __global float16 *cosval);
-float __ovld sincos(float x, __local float *cosval);
-float2 __ovld sincos(float2 x, __local float2 *cosval);
-float3 __ovld sincos(float3 x, __local float3 *cosval);
-float4 __ovld sincos(float4 x, __local float4 *cosval);
-float8 __ovld sincos(float8 x, __local float8 *cosval);
-float16 __ovld sincos(float16 x, __local float16 *cosval);
-float __ovld sincos(float x, __private float *cosval);
-float2 __ovld sincos(float2 x, __private float2 *cosval);
-float3 __ovld sincos(float3 x, __private float3 *cosval);
-float4 __ovld sincos(float4 x, __private float4 *cosval);
-float8 __ovld sincos(float8 x, __private float8 *cosval);
-float16 __ovld sincos(float16 x, __private float16 *cosval);
-#ifdef cl_khr_fp64
-double __ovld sincos(double x, __global double *cosval);
-double2 __ovld sincos(double2 x, __global double2 *cosval);
-double3 __ovld sincos(double3 x, __global double3 *cosval);
-double4 __ovld sincos(double4 x, __global double4 *cosval);
-double8 __ovld sincos(double8 x, __global double8 *cosval);
-double16 __ovld sincos(double16 x, __global double16 *cosval);
-double __ovld sincos(double x, __local double *cosval);
-double2 __ovld sincos(double2 x, __local double2 *cosval);
-double3 __ovld sincos(double3 x, __local double3 *cosval);
-double4 __ovld sincos(double4 x, __local double4 *cosval);
-double8 __ovld sincos(double8 x, __local double8 *cosval);
-double16 __ovld sincos(double16 x, __local double16 *cosval);
-double __ovld sincos(double x, __private double *cosval);
-double2 __ovld sincos(double2 x, __private double2 *cosval);
-double3 __ovld sincos(double3 x, __private double3 *cosval);
-double4 __ovld sincos(double4 x, __private double4 *cosval);
-double8 __ovld sincos(double8 x, __private double8 *cosval);
-double16 __ovld sincos(double16 x, __private double16 *cosval);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld sincos(half x, __global half *cosval);
-half2 __ovld sincos(half2 x, __global half2 *cosval);
-half3 __ovld sincos(half3 x, __global half3 *cosval);
-half4 __ovld sincos(half4 x, __global half4 *cosval);
-half8 __ovld sincos(half8 x, __global half8 *cosval);
-half16 __ovld sincos(half16 x, __global half16 *cosval);
-half __ovld sincos(half x, __local half *cosval);
-half2 __ovld sincos(half2 x, __local half2 *cosval);
-half3 __ovld sincos(half3 x, __local half3 *cosval);
-half4 __ovld sincos(half4 x, __local half4 *cosval);
-half8 __ovld sincos(half8 x, __local half8 *cosval);
-half16 __ovld sincos(half16 x, __local half16 *cosval);
-half __ovld sincos(half x, __private half *cosval);
-half2 __ovld sincos(half2 x, __private half2 *cosval);
-half3 __ovld sincos(half3 x, __private half3 *cosval);
-half4 __ovld sincos(half4 x, __private half4 *cosval);
-half8 __ovld sincos(half8 x, __private half8 *cosval);
-half16 __ovld sincos(half16 x, __private half16 *cosval);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Compute hyperbolic sine.
- */
-float __ovld __cnfn sinh(float);
-float2 __ovld __cnfn sinh(float2);
-float3 __ovld __cnfn sinh(float3);
-float4 __ovld __cnfn sinh(float4);
-float8 __ovld __cnfn sinh(float8);
-float16 __ovld __cnfn sinh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sinh(double);
-double2 __ovld __cnfn sinh(double2);
-double3 __ovld __cnfn sinh(double3);
-double4 __ovld __cnfn sinh(double4);
-double8 __ovld __cnfn sinh(double8);
-double16 __ovld __cnfn sinh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sinh(half);
-half2 __ovld __cnfn sinh(half2);
-half3 __ovld __cnfn sinh(half3);
-half4 __ovld __cnfn sinh(half4);
-half8 __ovld __cnfn sinh(half8);
-half16 __ovld __cnfn sinh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute sin (PI * x).
- */
-float __ovld __cnfn sinpi(float x);
-float2 __ovld __cnfn sinpi(float2 x);
-float3 __ovld __cnfn sinpi(float3 x);
-float4 __ovld __cnfn sinpi(float4 x);
-float8 __ovld __cnfn sinpi(float8 x);
-float16 __ovld __cnfn sinpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sinpi(double x);
-double2 __ovld __cnfn sinpi(double2 x);
-double3 __ovld __cnfn sinpi(double3 x);
-double4 __ovld __cnfn sinpi(double4 x);
-double8 __ovld __cnfn sinpi(double8 x);
-double16 __ovld __cnfn sinpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sinpi(half x);
-half2 __ovld __cnfn sinpi(half2 x);
-half3 __ovld __cnfn sinpi(half3 x);
-half4 __ovld __cnfn sinpi(half4 x);
-half8 __ovld __cnfn sinpi(half8 x);
-half16 __ovld __cnfn sinpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute square root.
- */
-float __ovld __cnfn sqrt(float);
-float2 __ovld __cnfn sqrt(float2);
-float3 __ovld __cnfn sqrt(float3);
-float4 __ovld __cnfn sqrt(float4);
-float8 __ovld __cnfn sqrt(float8);
-float16 __ovld __cnfn sqrt(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sqrt(double);
-double2 __ovld __cnfn sqrt(double2);
-double3 __ovld __cnfn sqrt(double3);
-double4 __ovld __cnfn sqrt(double4);
-double8 __ovld __cnfn sqrt(double8);
-double16 __ovld __cnfn sqrt(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sqrt(half);
-half2 __ovld __cnfn sqrt(half2);
-half3 __ovld __cnfn sqrt(half3);
-half4 __ovld __cnfn sqrt(half4);
-half8 __ovld __cnfn sqrt(half8);
-half16 __ovld __cnfn sqrt(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute tangent.
- */
-float __ovld __cnfn tan(float);
-float2 __ovld __cnfn tan(float2);
-float3 __ovld __cnfn tan(float3);
-float4 __ovld __cnfn tan(float4);
-float8 __ovld __cnfn tan(float8);
-float16 __ovld __cnfn tan(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tan(double);
-double2 __ovld __cnfn tan(double2);
-double3 __ovld __cnfn tan(double3);
-double4 __ovld __cnfn tan(double4);
-double8 __ovld __cnfn tan(double8);
-double16 __ovld __cnfn tan(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tan(half);
-half2 __ovld __cnfn tan(half2);
-half3 __ovld __cnfn tan(half3);
-half4 __ovld __cnfn tan(half4);
-half8 __ovld __cnfn tan(half8);
-half16 __ovld __cnfn tan(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute hyperbolic tangent.
- */
-float __ovld __cnfn tanh(float);
-float2 __ovld __cnfn tanh(float2);
-float3 __ovld __cnfn tanh(float3);
-float4 __ovld __cnfn tanh(float4);
-float8 __ovld __cnfn tanh(float8);
-float16 __ovld __cnfn tanh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tanh(double);
-double2 __ovld __cnfn tanh(double2);
-double3 __ovld __cnfn tanh(double3);
-double4 __ovld __cnfn tanh(double4);
-double8 __ovld __cnfn tanh(double8);
-double16 __ovld __cnfn tanh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tanh(half);
-half2 __ovld __cnfn tanh(half2);
-half3 __ovld __cnfn tanh(half3);
-half4 __ovld __cnfn tanh(half4);
-half8 __ovld __cnfn tanh(half8);
-half16 __ovld __cnfn tanh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute tan (PI * x).
- */
-float __ovld __cnfn tanpi(float x);
-float2 __ovld __cnfn tanpi(float2 x);
-float3 __ovld __cnfn tanpi(float3 x);
-float4 __ovld __cnfn tanpi(float4 x);
-float8 __ovld __cnfn tanpi(float8 x);
-float16 __ovld __cnfn tanpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tanpi(double x);
-double2 __ovld __cnfn tanpi(double2 x);
-double3 __ovld __cnfn tanpi(double3 x);
-double4 __ovld __cnfn tanpi(double4 x);
-double8 __ovld __cnfn tanpi(double8 x);
-double16 __ovld __cnfn tanpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tanpi(half x);
-half2 __ovld __cnfn tanpi(half2 x);
-half3 __ovld __cnfn tanpi(half3 x);
-half4 __ovld __cnfn tanpi(half4 x);
-half8 __ovld __cnfn tanpi(half8 x);
-half16 __ovld __cnfn tanpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute the gamma function.
- */
-float __ovld __cnfn tgamma(float);
-float2 __ovld __cnfn tgamma(float2);
-float3 __ovld __cnfn tgamma(float3);
-float4 __ovld __cnfn tgamma(float4);
-float8 __ovld __cnfn tgamma(float8);
-float16 __ovld __cnfn tgamma(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tgamma(double);
-double2 __ovld __cnfn tgamma(double2);
-double3 __ovld __cnfn tgamma(double3);
-double4 __ovld __cnfn tgamma(double4);
-double8 __ovld __cnfn tgamma(double8);
-double16 __ovld __cnfn tgamma(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tgamma(half);
-half2 __ovld __cnfn tgamma(half2);
-half3 __ovld __cnfn tgamma(half3);
-half4 __ovld __cnfn tgamma(half4);
-half8 __ovld __cnfn tgamma(half8);
-half16 __ovld __cnfn tgamma(half16);
-#endif //cl_khr_fp16
-
-/**
- * Round to integral value using the round to zero
- * rounding mode.
- */
-float __ovld __cnfn trunc(float);
-float2 __ovld __cnfn trunc(float2);
-float3 __ovld __cnfn trunc(float3);
-float4 __ovld __cnfn trunc(float4);
-float8 __ovld __cnfn trunc(float8);
-float16 __ovld __cnfn trunc(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn trunc(double);
-double2 __ovld __cnfn trunc(double2);
-double3 __ovld __cnfn trunc(double3);
-double4 __ovld __cnfn trunc(double4);
-double8 __ovld __cnfn trunc(double8);
-double16 __ovld __cnfn trunc(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn trunc(half);
-half2 __ovld __cnfn trunc(half2);
-half3 __ovld __cnfn trunc(half3);
-half4 __ovld __cnfn trunc(half4);
-half8 __ovld __cnfn trunc(half8);
-half16 __ovld __cnfn trunc(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute cosine. x must be in the range -2^16 ... +2^16.
- */
-float __ovld __cnfn half_cos(float x);
-float2 __ovld __cnfn half_cos(float2 x);
-float3 __ovld __cnfn half_cos(float3 x);
-float4 __ovld __cnfn half_cos(float4 x);
-float8 __ovld __cnfn half_cos(float8 x);
-float16 __ovld __cnfn half_cos(float16 x);
-
-/**
- * Compute x / y.
- */
-float __ovld __cnfn half_divide(float x, float y);
-float2 __ovld __cnfn half_divide(float2 x, float2 y);
-float3 __ovld __cnfn half_divide(float3 x, float3 y);
-float4 __ovld __cnfn half_divide(float4 x, float4 y);
-float8 __ovld __cnfn half_divide(float8 x, float8 y);
-float16 __ovld __cnfn half_divide(float16 x, float16 y);
-
-/**
- * Compute the base- e exponential of x.
- */
-float __ovld __cnfn half_exp(float x);
-float2 __ovld __cnfn half_exp(float2 x);
-float3 __ovld __cnfn half_exp(float3 x);
-float4 __ovld __cnfn half_exp(float4 x);
-float8 __ovld __cnfn half_exp(float8 x);
-float16 __ovld __cnfn half_exp(float16 x);
-
-/**
- * Compute the base- 2 exponential of x.
- */
-float __ovld __cnfn half_exp2(float x);
-float2 __ovld __cnfn half_exp2(float2 x);
-float3 __ovld __cnfn half_exp2(float3 x);
-float4 __ovld __cnfn half_exp2(float4 x);
-float8 __ovld __cnfn half_exp2(float8 x);
-float16 __ovld __cnfn half_exp2(float16 x);
-
-/**
- * Compute the base- 10 exponential of x.
- */
-float __ovld __cnfn half_exp10(float x);
-float2 __ovld __cnfn half_exp10(float2 x);
-float3 __ovld __cnfn half_exp10(float3 x);
-float4 __ovld __cnfn half_exp10(float4 x);
-float8 __ovld __cnfn half_exp10(float8 x);
-float16 __ovld __cnfn half_exp10(float16 x);
-
-/**
- * Compute natural logarithm.
- */
-float __ovld __cnfn half_log(float x);
-float2 __ovld __cnfn half_log(float2 x);
-float3 __ovld __cnfn half_log(float3 x);
-float4 __ovld __cnfn half_log(float4 x);
-float8 __ovld __cnfn half_log(float8 x);
-float16 __ovld __cnfn half_log(float16 x);
-
-/**
- * Compute a base 2 logarithm.
- */
-float __ovld __cnfn half_log2(float x);
-float2 __ovld __cnfn half_log2(float2 x);
-float3 __ovld __cnfn half_log2(float3 x);
-float4 __ovld __cnfn half_log2(float4 x);
-float8 __ovld __cnfn half_log2(float8 x);
-float16 __ovld __cnfn half_log2(float16 x);
-
-/**
- * Compute a base 10 logarithm.
- */
-float __ovld __cnfn half_log10(float x);
-float2 __ovld __cnfn half_log10(float2 x);
-float3 __ovld __cnfn half_log10(float3 x);
-float4 __ovld __cnfn half_log10(float4 x);
-float8 __ovld __cnfn half_log10(float8 x);
-float16 __ovld __cnfn half_log10(float16 x);
-
-/**
- * Compute x to the power y, where x is >= 0.
- */
-float __ovld __cnfn half_powr(float x, float y);
-float2 __ovld __cnfn half_powr(float2 x, float2 y);
-float3 __ovld __cnfn half_powr(float3 x, float3 y);
-float4 __ovld __cnfn half_powr(float4 x, float4 y);
-float8 __ovld __cnfn half_powr(float8 x, float8 y);
-float16 __ovld __cnfn half_powr(float16 x, float16 y);
-
-/**
- * Compute reciprocal.
- */
-float __ovld __cnfn half_recip(float x);
-float2 __ovld __cnfn half_recip(float2 x);
-float3 __ovld __cnfn half_recip(float3 x);
-float4 __ovld __cnfn half_recip(float4 x);
-float8 __ovld __cnfn half_recip(float8 x);
-float16 __ovld __cnfn half_recip(float16 x);
-
-/**
- * Compute inverse square root.
- */
-float __ovld __cnfn half_rsqrt(float x);
-float2 __ovld __cnfn half_rsqrt(float2 x);
-float3 __ovld __cnfn half_rsqrt(float3 x);
-float4 __ovld __cnfn half_rsqrt(float4 x);
-float8 __ovld __cnfn half_rsqrt(float8 x);
-float16 __ovld __cnfn half_rsqrt(float16 x);
-
-/**
- * Compute sine. x must be in the range -2^16 ... +2^16.
- */
-float __ovld __cnfn half_sin(float x);
-float2 __ovld __cnfn half_sin(float2 x);
-float3 __ovld __cnfn half_sin(float3 x);
-float4 __ovld __cnfn half_sin(float4 x);
-float8 __ovld __cnfn half_sin(float8 x);
-float16 __ovld __cnfn half_sin(float16 x);
-
-/**
- * Compute square root.
- */
-float __ovld __cnfn half_sqrt(float x);
-float2 __ovld __cnfn half_sqrt(float2 x);
-float3 __ovld __cnfn half_sqrt(float3 x);
-float4 __ovld __cnfn half_sqrt(float4 x);
-float8 __ovld __cnfn half_sqrt(float8 x);
-float16 __ovld __cnfn half_sqrt(float16 x);
-
-/**
- * Compute tangent. x must be in the range -216 ... +216.
- */
-float __ovld __cnfn half_tan(float x);
-float2 __ovld __cnfn half_tan(float2 x);
-float3 __ovld __cnfn half_tan(float3 x);
-float4 __ovld __cnfn half_tan(float4 x);
-float8 __ovld __cnfn half_tan(float8 x);
-float16 __ovld __cnfn half_tan(float16 x);
-
-/**
- * Compute cosine over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_cos(float x);
-float2 __ovld __cnfn native_cos(float2 x);
-float3 __ovld __cnfn native_cos(float3 x);
-float4 __ovld __cnfn native_cos(float4 x);
-float8 __ovld __cnfn native_cos(float8 x);
-float16 __ovld __cnfn native_cos(float16 x);
-
-/**
- * Compute x / y over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_divide(float x, float y);
-float2 __ovld __cnfn native_divide(float2 x, float2 y);
-float3 __ovld __cnfn native_divide(float3 x, float3 y);
-float4 __ovld __cnfn native_divide(float4 x, float4 y);
-float8 __ovld __cnfn native_divide(float8 x, float8 y);
-float16 __ovld __cnfn native_divide(float16 x, float16 y);
-
-/**
- * Compute the base- e exponential of x over an
- * implementation-defined range. The maximum error is
- * implementation-defined.
- */
-float __ovld __cnfn native_exp(float x);
-float2 __ovld __cnfn native_exp(float2 x);
-float3 __ovld __cnfn native_exp(float3 x);
-float4 __ovld __cnfn native_exp(float4 x);
-float8 __ovld __cnfn native_exp(float8 x);
-float16 __ovld __cnfn native_exp(float16 x);
-
-/**
- * Compute the base- 2 exponential of x over an
- * implementation-defined range. The maximum error is
- * implementation-defined.
- */
-float __ovld __cnfn native_exp2(float x);
-float2 __ovld __cnfn native_exp2(float2 x);
-float3 __ovld __cnfn native_exp2(float3 x);
-float4 __ovld __cnfn native_exp2(float4 x);
-float8 __ovld __cnfn native_exp2(float8 x);
-float16 __ovld __cnfn native_exp2(float16 x);
-
-/**
- * Compute the base- 10 exponential of x over an
- * implementation-defined range. The maximum error is
- * implementation-defined.
- */
-float __ovld __cnfn native_exp10(float x);
-float2 __ovld __cnfn native_exp10(float2 x);
-float3 __ovld __cnfn native_exp10(float3 x);
-float4 __ovld __cnfn native_exp10(float4 x);
-float8 __ovld __cnfn native_exp10(float8 x);
-float16 __ovld __cnfn native_exp10(float16 x);
-
-/**
- * Compute natural logarithm over an implementationdefined
- * range. The maximum error is implementation
- * defined.
- */
-float __ovld __cnfn native_log(float x);
-float2 __ovld __cnfn native_log(float2 x);
-float3 __ovld __cnfn native_log(float3 x);
-float4 __ovld __cnfn native_log(float4 x);
-float8 __ovld __cnfn native_log(float8 x);
-float16 __ovld __cnfn native_log(float16 x);
-
-/**
- * Compute a base 2 logarithm over an implementationdefined
- * range. The maximum error is implementationdefined.
- */
-float __ovld __cnfn native_log2(float x);
-float2 __ovld __cnfn native_log2(float2 x);
-float3 __ovld __cnfn native_log2(float3 x);
-float4 __ovld __cnfn native_log2(float4 x);
-float8 __ovld __cnfn native_log2(float8 x);
-float16 __ovld __cnfn native_log2(float16 x);
-
-/**
- * Compute a base 10 logarithm over an implementationdefined
- * range. The maximum error is implementationdefined.
- */
-float __ovld __cnfn native_log10(float x);
-float2 __ovld __cnfn native_log10(float2 x);
-float3 __ovld __cnfn native_log10(float3 x);
-float4 __ovld __cnfn native_log10(float4 x);
-float8 __ovld __cnfn native_log10(float8 x);
-float16 __ovld __cnfn native_log10(float16 x);
-
-/**
- * Compute x to the power y, where x is >= 0. The range of
- * x and y are implementation-defined. The maximum error
- * is implementation-defined.
- */
-float __ovld __cnfn native_powr(float x, float y);
-float2 __ovld __cnfn native_powr(float2 x, float2 y);
-float3 __ovld __cnfn native_powr(float3 x, float3 y);
-float4 __ovld __cnfn native_powr(float4 x, float4 y);
-float8 __ovld __cnfn native_powr(float8 x, float8 y);
-float16 __ovld __cnfn native_powr(float16 x, float16 y);
-
-/**
- * Compute reciprocal over an implementation-defined
- * range. The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_recip(float x);
-float2 __ovld __cnfn native_recip(float2 x);
-float3 __ovld __cnfn native_recip(float3 x);
-float4 __ovld __cnfn native_recip(float4 x);
-float8 __ovld __cnfn native_recip(float8 x);
-float16 __ovld __cnfn native_recip(float16 x);
-
-/**
- * Compute inverse square root over an implementationdefined
- * range. The maximum error is implementationdefined.
- */
-float __ovld __cnfn native_rsqrt(float x);
-float2 __ovld __cnfn native_rsqrt(float2 x);
-float3 __ovld __cnfn native_rsqrt(float3 x);
-float4 __ovld __cnfn native_rsqrt(float4 x);
-float8 __ovld __cnfn native_rsqrt(float8 x);
-float16 __ovld __cnfn native_rsqrt(float16 x);
-
-/**
- * Compute sine over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_sin(float x);
-float2 __ovld __cnfn native_sin(float2 x);
-float3 __ovld __cnfn native_sin(float3 x);
-float4 __ovld __cnfn native_sin(float4 x);
-float8 __ovld __cnfn native_sin(float8 x);
-float16 __ovld __cnfn native_sin(float16 x);
-
-/**
- * Compute square root over an implementation-defined
- * range. The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_sqrt(float x);
-float2 __ovld __cnfn native_sqrt(float2 x);
-float3 __ovld __cnfn native_sqrt(float3 x);
-float4 __ovld __cnfn native_sqrt(float4 x);
-float8 __ovld __cnfn native_sqrt(float8 x);
-float16 __ovld __cnfn native_sqrt(float16 x);
-
-/**
- * Compute tangent over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_tan(float x);
-float2 __ovld __cnfn native_tan(float2 x);
-float3 __ovld __cnfn native_tan(float3 x);
-float4 __ovld __cnfn native_tan(float4 x);
-float8 __ovld __cnfn native_tan(float8 x);
-float16 __ovld __cnfn native_tan(float16 x);
-
-// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions
-
-/**
- * Returns | x |.
- */
-uchar __ovld __cnfn abs(char x);
-uchar __ovld __cnfn abs(uchar x);
-uchar2 __ovld __cnfn abs(char2 x);
-uchar2 __ovld __cnfn abs(uchar2 x);
-uchar3 __ovld __cnfn abs(char3 x);
-uchar3 __ovld __cnfn abs(uchar3 x);
-uchar4 __ovld __cnfn abs(char4 x);
-uchar4 __ovld __cnfn abs(uchar4 x);
-uchar8 __ovld __cnfn abs(char8 x);
-uchar8 __ovld __cnfn abs(uchar8 x);
-uchar16 __ovld __cnfn abs(char16 x);
-uchar16 __ovld __cnfn abs(uchar16 x);
-ushort __ovld __cnfn abs(short x);
-ushort __ovld __cnfn abs(ushort x);
-ushort2 __ovld __cnfn abs(short2 x);
-ushort2 __ovld __cnfn abs(ushort2 x);
-ushort3 __ovld __cnfn abs(short3 x);
-ushort3 __ovld __cnfn abs(ushort3 x);
-ushort4 __ovld __cnfn abs(short4 x);
-ushort4 __ovld __cnfn abs(ushort4 x);
-ushort8 __ovld __cnfn abs(short8 x);
-ushort8 __ovld __cnfn abs(ushort8 x);
-ushort16 __ovld __cnfn abs(short16 x);
-ushort16 __ovld __cnfn abs(ushort16 x);
-uint __ovld __cnfn abs(int x);
-uint __ovld __cnfn abs(uint x);
-uint2 __ovld __cnfn abs(int2 x);
-uint2 __ovld __cnfn abs(uint2 x);
-uint3 __ovld __cnfn abs(int3 x);
-uint3 __ovld __cnfn abs(uint3 x);
-uint4 __ovld __cnfn abs(int4 x);
-uint4 __ovld __cnfn abs(uint4 x);
-uint8 __ovld __cnfn abs(int8 x);
-uint8 __ovld __cnfn abs(uint8 x);
-uint16 __ovld __cnfn abs(int16 x);
-uint16 __ovld __cnfn abs(uint16 x);
-ulong __ovld __cnfn abs(long x);
-ulong __ovld __cnfn abs(ulong x);
-ulong2 __ovld __cnfn abs(long2 x);
-ulong2 __ovld __cnfn abs(ulong2 x);
-ulong3 __ovld __cnfn abs(long3 x);
-ulong3 __ovld __cnfn abs(ulong3 x);
-ulong4 __ovld __cnfn abs(long4 x);
-ulong4 __ovld __cnfn abs(ulong4 x);
-ulong8 __ovld __cnfn abs(long8 x);
-ulong8 __ovld __cnfn abs(ulong8 x);
-ulong16 __ovld __cnfn abs(long16 x);
-ulong16 __ovld __cnfn abs(ulong16 x);
-
-/**
- * Returns | x - y | without modulo overflow.
- */
-uchar __ovld __cnfn abs_diff(char x, char y);
-uchar __ovld __cnfn abs_diff(uchar x, uchar y);
-uchar2 __ovld __cnfn abs_diff(char2 x, char2 y);
-uchar2 __ovld __cnfn abs_diff(uchar2 x, uchar2 y);
-uchar3 __ovld __cnfn abs_diff(char3 x, char3 y);
-uchar3 __ovld __cnfn abs_diff(uchar3 x, uchar3 y);
-uchar4 __ovld __cnfn abs_diff(char4 x, char4 y);
-uchar4 __ovld __cnfn abs_diff(uchar4 x, uchar4 y);
-uchar8 __ovld __cnfn abs_diff(char8 x, char8 y);
-uchar8 __ovld __cnfn abs_diff(uchar8 x, uchar8 y);
-uchar16 __ovld __cnfn abs_diff(char16 x, char16 y);
-uchar16 __ovld __cnfn abs_diff(uchar16 x, uchar16 y);
-ushort __ovld __cnfn abs_diff(short x, short y);
-ushort __ovld __cnfn abs_diff(ushort x, ushort y);
-ushort2 __ovld __cnfn abs_diff(short2 x, short2 y);
-ushort2 __ovld __cnfn abs_diff(ushort2 x, ushort2 y);
-ushort3 __ovld __cnfn abs_diff(short3 x, short3 y);
-ushort3 __ovld __cnfn abs_diff(ushort3 x, ushort3 y);
-ushort4 __ovld __cnfn abs_diff(short4 x, short4 y);
-ushort4 __ovld __cnfn abs_diff(ushort4 x, ushort4 y);
-ushort8 __ovld __cnfn abs_diff(short8 x, short8 y);
-ushort8 __ovld __cnfn abs_diff(ushort8 x, ushort8 y);
-ushort16 __ovld __cnfn abs_diff(short16 x, short16 y);
-ushort16 __ovld __cnfn abs_diff(ushort16 x, ushort16 y);
-uint __ovld __cnfn abs_diff(int x, int y);
-uint __ovld __cnfn abs_diff(uint x, uint y);
-uint2 __ovld __cnfn abs_diff(int2 x, int2 y);
-uint2 __ovld __cnfn abs_diff(uint2 x, uint2 y);
-uint3 __ovld __cnfn abs_diff(int3 x, int3 y);
-uint3 __ovld __cnfn abs_diff(uint3 x, uint3 y);
-uint4 __ovld __cnfn abs_diff(int4 x, int4 y);
-uint4 __ovld __cnfn abs_diff(uint4 x, uint4 y);
-uint8 __ovld __cnfn abs_diff(int8 x, int8 y);
-uint8 __ovld __cnfn abs_diff(uint8 x, uint8 y);
-uint16 __ovld __cnfn abs_diff(int16 x, int16 y);
-uint16 __ovld __cnfn abs_diff(uint16 x, uint16 y);
-ulong __ovld __cnfn abs_diff(long x, long y);
-ulong __ovld __cnfn abs_diff(ulong x, ulong y);
-ulong2 __ovld __cnfn abs_diff(long2 x, long2 y);
-ulong2 __ovld __cnfn abs_diff(ulong2 x, ulong2 y);
-ulong3 __ovld __cnfn abs_diff(long3 x, long3 y);
-ulong3 __ovld __cnfn abs_diff(ulong3 x, ulong3 y);
-ulong4 __ovld __cnfn abs_diff(long4 x, long4 y);
-ulong4 __ovld __cnfn abs_diff(ulong4 x, ulong4 y);
-ulong8 __ovld __cnfn abs_diff(long8 x, long8 y);
-ulong8 __ovld __cnfn abs_diff(ulong8 x, ulong8 y);
-ulong16 __ovld __cnfn abs_diff(long16 x, long16 y);
-ulong16 __ovld __cnfn abs_diff(ulong16 x, ulong16 y);
-
-/**
- * Returns x + y and saturates the result.
- */
-char __ovld __cnfn add_sat(char x, char y);
-uchar __ovld __cnfn add_sat(uchar x, uchar y);
-char2 __ovld __cnfn add_sat(char2 x, char2 y);
-uchar2 __ovld __cnfn add_sat(uchar2 x, uchar2 y);
-char3 __ovld __cnfn add_sat(char3 x, char3 y);
-uchar3 __ovld __cnfn add_sat(uchar3 x, uchar3 y);
-char4 __ovld __cnfn add_sat(char4 x, char4 y);
-uchar4 __ovld __cnfn add_sat(uchar4 x, uchar4 y);
-char8 __ovld __cnfn add_sat(char8 x, char8 y);
-uchar8 __ovld __cnfn add_sat(uchar8 x, uchar8 y);
-char16 __ovld __cnfn add_sat(char16 x, char16 y);
-uchar16 __ovld __cnfn add_sat(uchar16 x, uchar16 y);
-short __ovld __cnfn add_sat(short x, short y);
-ushort __ovld __cnfn add_sat(ushort x, ushort y);
-short2 __ovld __cnfn add_sat(short2 x, short2 y);
-ushort2 __ovld __cnfn add_sat(ushort2 x, ushort2 y);
-short3 __ovld __cnfn add_sat(short3 x, short3 y);
-ushort3 __ovld __cnfn add_sat(ushort3 x, ushort3 y);
-short4 __ovld __cnfn add_sat(short4 x, short4 y);
-ushort4 __ovld __cnfn add_sat(ushort4 x, ushort4 y);
-short8 __ovld __cnfn add_sat(short8 x, short8 y);
-ushort8 __ovld __cnfn add_sat(ushort8 x, ushort8 y);
-short16 __ovld __cnfn add_sat(short16 x, short16 y);
-ushort16 __ovld __cnfn add_sat(ushort16 x, ushort16 y);
-int __ovld __cnfn add_sat(int x, int y);
-uint __ovld __cnfn add_sat(uint x, uint y);
-int2 __ovld __cnfn add_sat(int2 x, int2 y);
-uint2 __ovld __cnfn add_sat(uint2 x, uint2 y);
-int3 __ovld __cnfn add_sat(int3 x, int3 y);
-uint3 __ovld __cnfn add_sat(uint3 x, uint3 y);
-int4 __ovld __cnfn add_sat(int4 x, int4 y);
-uint4 __ovld __cnfn add_sat(uint4 x, uint4 y);
-int8 __ovld __cnfn add_sat(int8 x, int8 y);
-uint8 __ovld __cnfn add_sat(uint8 x, uint8 y);
-int16 __ovld __cnfn add_sat(int16 x, int16 y);
-uint16 __ovld __cnfn add_sat(uint16 x, uint16 y);
-long __ovld __cnfn add_sat(long x, long y);
-ulong __ovld __cnfn add_sat(ulong x, ulong y);
-long2 __ovld __cnfn add_sat(long2 x, long2 y);
-ulong2 __ovld __cnfn add_sat(ulong2 x, ulong2 y);
-long3 __ovld __cnfn add_sat(long3 x, long3 y);
-ulong3 __ovld __cnfn add_sat(ulong3 x, ulong3 y);
-long4 __ovld __cnfn add_sat(long4 x, long4 y);
-ulong4 __ovld __cnfn add_sat(ulong4 x, ulong4 y);
-long8 __ovld __cnfn add_sat(long8 x, long8 y);
-ulong8 __ovld __cnfn add_sat(ulong8 x, ulong8 y);
-long16 __ovld __cnfn add_sat(long16 x, long16 y);
-ulong16 __ovld __cnfn add_sat(ulong16 x, ulong16 y);
-
-/**
- * Returns (x + y) >> 1. The intermediate sum does
- * not modulo overflow.
- */
-char __ovld __cnfn hadd(char x, char y);
-uchar __ovld __cnfn hadd(uchar x, uchar y);
-char2 __ovld __cnfn hadd(char2 x, char2 y);
-uchar2 __ovld __cnfn hadd(uchar2 x, uchar2 y);
-char3 __ovld __cnfn hadd(char3 x, char3 y);
-uchar3 __ovld __cnfn hadd(uchar3 x, uchar3 y);
-char4 __ovld __cnfn hadd(char4 x, char4 y);
-uchar4 __ovld __cnfn hadd(uchar4 x, uchar4 y);
-char8 __ovld __cnfn hadd(char8 x, char8 y);
-uchar8 __ovld __cnfn hadd(uchar8 x, uchar8 y);
-char16 __ovld __cnfn hadd(char16 x, char16 y);
-uchar16 __ovld __cnfn hadd(uchar16 x, uchar16 y);
-short __ovld __cnfn hadd(short x, short y);
-ushort __ovld __cnfn hadd(ushort x, ushort y);
-short2 __ovld __cnfn hadd(short2 x, short2 y);
-ushort2 __ovld __cnfn hadd(ushort2 x, ushort2 y);
-short3 __ovld __cnfn hadd(short3 x, short3 y);
-ushort3 __ovld __cnfn hadd(ushort3 x, ushort3 y);
-short4 __ovld __cnfn hadd(short4 x, short4 y);
-ushort4 __ovld __cnfn hadd(ushort4 x, ushort4 y);
-short8 __ovld __cnfn hadd(short8 x, short8 y);
-ushort8 __ovld __cnfn hadd(ushort8 x, ushort8 y);
-short16 __ovld __cnfn hadd(short16 x, short16 y);
-ushort16 __ovld __cnfn hadd(ushort16 x, ushort16 y);
-int __ovld __cnfn hadd(int x, int y);
-uint __ovld __cnfn hadd(uint x, uint y);
-int2 __ovld __cnfn hadd(int2 x, int2 y);
-uint2 __ovld __cnfn hadd(uint2 x, uint2 y);
-int3 __ovld __cnfn hadd(int3 x, int3 y);
-uint3 __ovld __cnfn hadd(uint3 x, uint3 y);
-int4 __ovld __cnfn hadd(int4 x, int4 y);
-uint4 __ovld __cnfn hadd(uint4 x, uint4 y);
-int8 __ovld __cnfn hadd(int8 x, int8 y);
-uint8 __ovld __cnfn hadd(uint8 x, uint8 y);
-int16 __ovld __cnfn hadd(int16 x, int16 y);
-uint16 __ovld __cnfn hadd(uint16 x, uint16 y);
-long __ovld __cnfn hadd(long x, long y);
-ulong __ovld __cnfn hadd(ulong x, ulong y);
-long2 __ovld __cnfn hadd(long2 x, long2 y);
-ulong2 __ovld __cnfn hadd(ulong2 x, ulong2 y);
-long3 __ovld __cnfn hadd(long3 x, long3 y);
-ulong3 __ovld __cnfn hadd(ulong3 x, ulong3 y);
-long4 __ovld __cnfn hadd(long4 x, long4 y);
-ulong4 __ovld __cnfn hadd(ulong4 x, ulong4 y);
-long8 __ovld __cnfn hadd(long8 x, long8 y);
-ulong8 __ovld __cnfn hadd(ulong8 x, ulong8 y);
-long16 __ovld __cnfn hadd(long16 x, long16 y);
-ulong16 __ovld __cnfn hadd(ulong16 x, ulong16 y);
-
-/**
- * Returns (x + y + 1) >> 1. The intermediate sum
- * does not modulo overflow.
- */
-char __ovld __cnfn rhadd(char x, char y);
-uchar __ovld __cnfn rhadd(uchar x, uchar y);
-char2 __ovld __cnfn rhadd(char2 x, char2 y);
-uchar2 __ovld __cnfn rhadd(uchar2 x, uchar2 y);
-char3 __ovld __cnfn rhadd(char3 x, char3 y);
-uchar3 __ovld __cnfn rhadd(uchar3 x, uchar3 y);
-char4 __ovld __cnfn rhadd(char4 x, char4 y);
-uchar4 __ovld __cnfn rhadd(uchar4 x, uchar4 y);
-char8 __ovld __cnfn rhadd(char8 x, char8 y);
-uchar8 __ovld __cnfn rhadd(uchar8 x, uchar8 y);
-char16 __ovld __cnfn rhadd(char16 x, char16 y);
-uchar16 __ovld __cnfn rhadd(uchar16 x, uchar16 y);
-short __ovld __cnfn rhadd(short x, short y);
-ushort __ovld __cnfn rhadd(ushort x, ushort y);
-short2 __ovld __cnfn rhadd(short2 x, short2 y);
-ushort2 __ovld __cnfn rhadd(ushort2 x, ushort2 y);
-short3 __ovld __cnfn rhadd(short3 x, short3 y);
-ushort3 __ovld __cnfn rhadd(ushort3 x, ushort3 y);
-short4 __ovld __cnfn rhadd(short4 x, short4 y);
-ushort4 __ovld __cnfn rhadd(ushort4 x, ushort4 y);
-short8 __ovld __cnfn rhadd(short8 x, short8 y);
-ushort8 __ovld __cnfn rhadd(ushort8 x, ushort8 y);
-short16 __ovld __cnfn rhadd(short16 x, short16 y);
-ushort16 __ovld __cnfn rhadd(ushort16 x, ushort16 y);
-int __ovld __cnfn rhadd(int x, int y);
-uint __ovld __cnfn rhadd(uint x, uint y);
-int2 __ovld __cnfn rhadd(int2 x, int2 y);
-uint2 __ovld __cnfn rhadd(uint2 x, uint2 y);
-int3 __ovld __cnfn rhadd(int3 x, int3 y);
-uint3 __ovld __cnfn rhadd(uint3 x, uint3 y);
-int4 __ovld __cnfn rhadd(int4 x, int4 y);
-uint4 __ovld __cnfn rhadd(uint4 x, uint4 y);
-int8 __ovld __cnfn rhadd(int8 x, int8 y);
-uint8 __ovld __cnfn rhadd(uint8 x, uint8 y);
-int16 __ovld __cnfn rhadd(int16 x, int16 y);
-uint16 __ovld __cnfn rhadd(uint16 x, uint16 y);
-long __ovld __cnfn rhadd(long x, long y);
-ulong __ovld __cnfn rhadd(ulong x, ulong y);
-long2 __ovld __cnfn rhadd(long2 x, long2 y);
-ulong2 __ovld __cnfn rhadd(ulong2 x, ulong2 y);
-long3 __ovld __cnfn rhadd(long3 x, long3 y);
-ulong3 __ovld __cnfn rhadd(ulong3 x, ulong3 y);
-long4 __ovld __cnfn rhadd(long4 x, long4 y);
-ulong4 __ovld __cnfn rhadd(ulong4 x, ulong4 y);
-long8 __ovld __cnfn rhadd(long8 x, long8 y);
-ulong8 __ovld __cnfn rhadd(ulong8 x, ulong8 y);
-long16 __ovld __cnfn rhadd(long16 x, long16 y);
-ulong16 __ovld __cnfn rhadd(ulong16 x, ulong16 y);
-
-/**
- * Returns min(max(x, minval), maxval).
- * Results are undefined if minval > maxval.
- */
-char __ovld __cnfn clamp(char x, char minval, char maxval);
-uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval);
-char2 __ovld __cnfn clamp(char2 x, char2 minval, char2 maxval);
-uchar2 __ovld __cnfn clamp(uchar2 x, uchar2 minval, uchar2 maxval);
-char3 __ovld __cnfn clamp(char3 x, char3 minval, char3 maxval);
-uchar3 __ovld __cnfn clamp(uchar3 x, uchar3 minval, uchar3 maxval);
-char4 __ovld __cnfn clamp(char4 x, char4 minval, char4 maxval);
-uchar4 __ovld __cnfn clamp(uchar4 x, uchar4 minval, uchar4 maxval);
-char8 __ovld __cnfn clamp(char8 x, char8 minval, char8 maxval);
-uchar8 __ovld __cnfn clamp(uchar8 x, uchar8 minval, uchar8 maxval);
-char16 __ovld __cnfn clamp(char16 x, char16 minval, char16 maxval);
-uchar16 __ovld __cnfn clamp(uchar16 x, uchar16 minval, uchar16 maxval);
-short __ovld __cnfn clamp(short x, short minval, short maxval);
-ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval);
-short2 __ovld __cnfn clamp(short2 x, short2 minval, short2 maxval);
-ushort2 __ovld __cnfn clamp(ushort2 x, ushort2 minval, ushort2 maxval);
-short3 __ovld __cnfn clamp(short3 x, short3 minval, short3 maxval);
-ushort3 __ovld __cnfn clamp(ushort3 x, ushort3 minval, ushort3 maxval);
-short4 __ovld __cnfn clamp(short4 x, short4 minval, short4 maxval);
-ushort4 __ovld __cnfn clamp(ushort4 x, ushort4 minval, ushort4 maxval);
-short8 __ovld __cnfn clamp(short8 x, short8 minval, short8 maxval);
-ushort8 __ovld __cnfn clamp(ushort8 x, ushort8 minval, ushort8 maxval);
-short16 __ovld __cnfn clamp(short16 x, short16 minval, short16 maxval);
-ushort16 __ovld __cnfn clamp(ushort16 x, ushort16 minval, ushort16 maxval);
-int __ovld __cnfn clamp(int x, int minval, int maxval);
-uint __ovld __cnfn clamp(uint x, uint minval, uint maxval);
-int2 __ovld __cnfn clamp(int2 x, int2 minval, int2 maxval);
-uint2 __ovld __cnfn clamp(uint2 x, uint2 minval, uint2 maxval);
-int3 __ovld __cnfn clamp(int3 x, int3 minval, int3 maxval);
-uint3 __ovld __cnfn clamp(uint3 x, uint3 minval, uint3 maxval);
-int4 __ovld __cnfn clamp(int4 x, int4 minval, int4 maxval);
-uint4 __ovld __cnfn clamp(uint4 x, uint4 minval, uint4 maxval);
-int8 __ovld __cnfn clamp(int8 x, int8 minval, int8 maxval);
-uint8 __ovld __cnfn clamp(uint8 x, uint8 minval, uint8 maxval);
-int16 __ovld __cnfn clamp(int16 x, int16 minval, int16 maxval);
-uint16 __ovld __cnfn clamp(uint16 x, uint16 minval, uint16 maxval);
-long __ovld __cnfn clamp(long x, long minval, long maxval);
-ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval);
-long2 __ovld __cnfn clamp(long2 x, long2 minval, long2 maxval);
-ulong2 __ovld __cnfn clamp(ulong2 x, ulong2 minval, ulong2 maxval);
-long3 __ovld __cnfn clamp(long3 x, long3 minval, long3 maxval);
-ulong3 __ovld __cnfn clamp(ulong3 x, ulong3 minval, ulong3 maxval);
-long4 __ovld __cnfn clamp(long4 x, long4 minval, long4 maxval);
-ulong4 __ovld __cnfn clamp(ulong4 x, ulong4 minval, ulong4 maxval);
-long8 __ovld __cnfn clamp(long8 x, long8 minval, long8 maxval);
-ulong8 __ovld __cnfn clamp(ulong8 x, ulong8 minval, ulong8 maxval);
-long16 __ovld __cnfn clamp(long16 x, long16 minval, long16 maxval);
-ulong16 __ovld __cnfn clamp(ulong16 x, ulong16 minval, ulong16 maxval);
-char2 __ovld __cnfn clamp(char2 x, char minval, char maxval);
-uchar2 __ovld __cnfn clamp(uchar2 x, uchar minval, uchar maxval);
-char3 __ovld __cnfn clamp(char3 x, char minval, char maxval);
-uchar3 __ovld __cnfn clamp(uchar3 x, uchar minval, uchar maxval);
-char4 __ovld __cnfn clamp(char4 x, char minval, char maxval);
-uchar4 __ovld __cnfn clamp(uchar4 x, uchar minval, uchar maxval);
-char8 __ovld __cnfn clamp(char8 x, char minval, char maxval);
-uchar8 __ovld __cnfn clamp(uchar8 x, uchar minval, uchar maxval);
-char16 __ovld __cnfn clamp(char16 x, char minval, char maxval);
-uchar16 __ovld __cnfn clamp(uchar16 x, uchar minval, uchar maxval);
-short2 __ovld __cnfn clamp(short2 x, short minval, short maxval);
-ushort2 __ovld __cnfn clamp(ushort2 x, ushort minval, ushort maxval);
-short3 __ovld __cnfn clamp(short3 x, short minval, short maxval);
-ushort3 __ovld __cnfn clamp(ushort3 x, ushort minval, ushort maxval);
-short4 __ovld __cnfn clamp(short4 x, short minval, short maxval);
-ushort4 __ovld __cnfn clamp(ushort4 x, ushort minval, ushort maxval);
-short8 __ovld __cnfn clamp(short8 x, short minval, short maxval);
-ushort8 __ovld __cnfn clamp(ushort8 x, ushort minval, ushort maxval);
-short16 __ovld __cnfn clamp(short16 x, short minval, short maxval);
-ushort16 __ovld __cnfn clamp(ushort16 x, ushort minval, ushort maxval);
-int2 __ovld __cnfn clamp(int2 x, int minval, int maxval);
-uint2 __ovld __cnfn clamp(uint2 x, uint minval, uint maxval);
-int3 __ovld __cnfn clamp(int3 x, int minval, int maxval);
-uint3 __ovld __cnfn clamp(uint3 x, uint minval, uint maxval);
-int4 __ovld __cnfn clamp(int4 x, int minval, int maxval);
-uint4 __ovld __cnfn clamp(uint4 x, uint minval, uint maxval);
-int8 __ovld __cnfn clamp(int8 x, int minval, int maxval);
-uint8 __ovld __cnfn clamp(uint8 x, uint minval, uint maxval);
-int16 __ovld __cnfn clamp(int16 x, int minval, int maxval);
-uint16 __ovld __cnfn clamp(uint16 x, uint minval, uint maxval);
-long2 __ovld __cnfn clamp(long2 x, long minval, long maxval);
-ulong2 __ovld __cnfn clamp(ulong2 x, ulong minval, ulong maxval);
-long3 __ovld __cnfn clamp(long3 x, long minval, long maxval);
-ulong3 __ovld __cnfn clamp(ulong3 x, ulong minval, ulong maxval);
-long4 __ovld __cnfn clamp(long4 x, long minval, long maxval);
-ulong4 __ovld __cnfn clamp(ulong4 x, ulong minval, ulong maxval);
-long8 __ovld __cnfn clamp(long8 x, long minval, long maxval);
-ulong8 __ovld __cnfn clamp(ulong8 x, ulong minval, ulong maxval);
-long16 __ovld __cnfn clamp(long16 x, long minval, long maxval);
-ulong16 __ovld __cnfn clamp(ulong16 x, ulong minval, ulong maxval);
-
-/**
- * Returns the number of leading 0-bits in x, starting
- * at the most significant bit position.
- */
-char __ovld __cnfn clz(char x);
-uchar __ovld __cnfn clz(uchar x);
-char2 __ovld __cnfn clz(char2 x);
-uchar2 __ovld __cnfn clz(uchar2 x);
-char3 __ovld __cnfn clz(char3 x);
-uchar3 __ovld __cnfn clz(uchar3 x);
-char4 __ovld __cnfn clz(char4 x);
-uchar4 __ovld __cnfn clz(uchar4 x);
-char8 __ovld __cnfn clz(char8 x);
-uchar8 __ovld __cnfn clz(uchar8 x);
-char16 __ovld __cnfn clz(char16 x);
-uchar16 __ovld __cnfn clz(uchar16 x);
-short __ovld __cnfn clz(short x);
-ushort __ovld __cnfn clz(ushort x);
-short2 __ovld __cnfn clz(short2 x);
-ushort2 __ovld __cnfn clz(ushort2 x);
-short3 __ovld __cnfn clz(short3 x);
-ushort3 __ovld __cnfn clz(ushort3 x);
-short4 __ovld __cnfn clz(short4 x);
-ushort4 __ovld __cnfn clz(ushort4 x);
-short8 __ovld __cnfn clz(short8 x);
-ushort8 __ovld __cnfn clz(ushort8 x);
-short16 __ovld __cnfn clz(short16 x);
-ushort16 __ovld __cnfn clz(ushort16 x);
-int __ovld __cnfn clz(int x);
-uint __ovld __cnfn clz(uint x);
-int2 __ovld __cnfn clz(int2 x);
-uint2 __ovld __cnfn clz(uint2 x);
-int3 __ovld __cnfn clz(int3 x);
-uint3 __ovld __cnfn clz(uint3 x);
-int4 __ovld __cnfn clz(int4 x);
-uint4 __ovld __cnfn clz(uint4 x);
-int8 __ovld __cnfn clz(int8 x);
-uint8 __ovld __cnfn clz(uint8 x);
-int16 __ovld __cnfn clz(int16 x);
-uint16 __ovld __cnfn clz(uint16 x);
-long __ovld __cnfn clz(long x);
-ulong __ovld __cnfn clz(ulong x);
-long2 __ovld __cnfn clz(long2 x);
-ulong2 __ovld __cnfn clz(ulong2 x);
-long3 __ovld __cnfn clz(long3 x);
-ulong3 __ovld __cnfn clz(ulong3 x);
-long4 __ovld __cnfn clz(long4 x);
-ulong4 __ovld __cnfn clz(ulong4 x);
-long8 __ovld __cnfn clz(long8 x);
-ulong8 __ovld __cnfn clz(ulong8 x);
-long16 __ovld __cnfn clz(long16 x);
-ulong16 __ovld __cnfn clz(ulong16 x);
-
-/**
- * Returns the count of trailing 0-bits in x. If x is 0,
- * returns the size in bits of the type of x or
- * component type of x, if x is a vector.
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-char __ovld __cnfn ctz(char x);
-uchar __ovld __cnfn ctz(uchar x);
-char2 __ovld __cnfn ctz(char2 x);
-uchar2 __ovld __cnfn ctz(uchar2 x);
-char3 __ovld __cnfn ctz(char3 x);
-uchar3 __ovld __cnfn ctz(uchar3 x);
-char4 __ovld __cnfn ctz(char4 x);
-uchar4 __ovld __cnfn ctz(uchar4 x);
-char8 __ovld __cnfn ctz(char8 x);
-uchar8 __ovld __cnfn ctz(uchar8 x);
-char16 __ovld __cnfn ctz(char16 x);
-uchar16 __ovld __cnfn ctz(uchar16 x);
-short __ovld __cnfn ctz(short x);
-ushort __ovld __cnfn ctz(ushort x);
-short2 __ovld __cnfn ctz(short2 x);
-ushort2 __ovld __cnfn ctz(ushort2 x);
-short3 __ovld __cnfn ctz(short3 x);
-ushort3 __ovld __cnfn ctz(ushort3 x);
-short4 __ovld __cnfn ctz(short4 x);
-ushort4 __ovld __cnfn ctz(ushort4 x);
-short8 __ovld __cnfn ctz(short8 x);
-ushort8 __ovld __cnfn ctz(ushort8 x);
-short16 __ovld __cnfn ctz(short16 x);
-ushort16 __ovld __cnfn ctz(ushort16 x);
-int __ovld __cnfn ctz(int x);
-uint __ovld __cnfn ctz(uint x);
-int2 __ovld __cnfn ctz(int2 x);
-uint2 __ovld __cnfn ctz(uint2 x);
-int3 __ovld __cnfn ctz(int3 x);
-uint3 __ovld __cnfn ctz(uint3 x);
-int4 __ovld __cnfn ctz(int4 x);
-uint4 __ovld __cnfn ctz(uint4 x);
-int8 __ovld __cnfn ctz(int8 x);
-uint8 __ovld __cnfn ctz(uint8 x);
-int16 __ovld __cnfn ctz(int16 x);
-uint16 __ovld __cnfn ctz(uint16 x);
-long __ovld __cnfn ctz(long x);
-ulong __ovld __cnfn ctz(ulong x);
-long2 __ovld __cnfn ctz(long2 x);
-ulong2 __ovld __cnfn ctz(ulong2 x);
-long3 __ovld __cnfn ctz(long3 x);
-ulong3 __ovld __cnfn ctz(ulong3 x);
-long4 __ovld __cnfn ctz(long4 x);
-ulong4 __ovld __cnfn ctz(ulong4 x);
-long8 __ovld __cnfn ctz(long8 x);
-ulong8 __ovld __cnfn ctz(ulong8 x);
-long16 __ovld __cnfn ctz(long16 x);
-ulong16 __ovld __cnfn ctz(ulong16 x);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Returns mul_hi(a, b) + c.
- */
-char __ovld __cnfn mad_hi(char a, char b, char c);
-uchar __ovld __cnfn mad_hi(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn mad_hi(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn mad_hi(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn mad_hi(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn mad_hi(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn mad_hi(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn mad_hi(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn mad_hi(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn mad_hi(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn mad_hi(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn mad_hi(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn mad_hi(short a, short b, short c);
-ushort __ovld __cnfn mad_hi(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn mad_hi(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn mad_hi(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn mad_hi(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn mad_hi(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn mad_hi(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn mad_hi(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn mad_hi(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn mad_hi(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn mad_hi(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn mad_hi(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn mad_hi(int a, int b, int c);
-uint __ovld __cnfn mad_hi(uint a, uint b, uint c);
-int2 __ovld __cnfn mad_hi(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn mad_hi(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn mad_hi(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn mad_hi(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn mad_hi(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn mad_hi(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn mad_hi(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn mad_hi(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn mad_hi(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn mad_hi(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn mad_hi(long a, long b, long c);
-ulong __ovld __cnfn mad_hi(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn mad_hi(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn mad_hi(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn mad_hi(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn mad_hi(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn mad_hi(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn mad_hi(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn mad_hi(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn mad_hi(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn mad_hi(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn mad_hi(ulong16 a, ulong16 b, ulong16 c);
-
-/**
- * Returns a * b + c and saturates the result.
- */
-char __ovld __cnfn mad_sat(char a, char b, char c);
-uchar __ovld __cnfn mad_sat(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn mad_sat(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn mad_sat(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn mad_sat(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn mad_sat(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn mad_sat(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn mad_sat(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn mad_sat(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn mad_sat(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn mad_sat(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn mad_sat(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn mad_sat(short a, short b, short c);
-ushort __ovld __cnfn mad_sat(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn mad_sat(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn mad_sat(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn mad_sat(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn mad_sat(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn mad_sat(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn mad_sat(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn mad_sat(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn mad_sat(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn mad_sat(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn mad_sat(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn mad_sat(int a, int b, int c);
-uint __ovld __cnfn mad_sat(uint a, uint b, uint c);
-int2 __ovld __cnfn mad_sat(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn mad_sat(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn mad_sat(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn mad_sat(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn mad_sat(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn mad_sat(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn mad_sat(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn mad_sat(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn mad_sat(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn mad_sat(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn mad_sat(long a, long b, long c);
-ulong __ovld __cnfn mad_sat(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn mad_sat(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn mad_sat(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn mad_sat(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn mad_sat(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn mad_sat(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn mad_sat(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn mad_sat(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn mad_sat(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn mad_sat(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn mad_sat(ulong16 a, ulong16 b, ulong16 c);
-
-/**
- * Returns y if x < y, otherwise it returns x.
- */
-char __ovld __cnfn max(char x, char y);
-uchar __ovld __cnfn max(uchar x, uchar y);
-char2 __ovld __cnfn max(char2 x, char2 y);
-uchar2 __ovld __cnfn max(uchar2 x, uchar2 y);
-char3 __ovld __cnfn max(char3 x, char3 y);
-uchar3 __ovld __cnfn max(uchar3 x, uchar3 y);
-char4 __ovld __cnfn max(char4 x, char4 y);
-uchar4 __ovld __cnfn max(uchar4 x, uchar4 y);
-char8 __ovld __cnfn max(char8 x, char8 y);
-uchar8 __ovld __cnfn max(uchar8 x, uchar8 y);
-char16 __ovld __cnfn max(char16 x, char16 y);
-uchar16 __ovld __cnfn max(uchar16 x, uchar16 y);
-short __ovld __cnfn max(short x, short y);
-ushort __ovld __cnfn max(ushort x, ushort y);
-short2 __ovld __cnfn max(short2 x, short2 y);
-ushort2 __ovld __cnfn max(ushort2 x, ushort2 y);
-short3 __ovld __cnfn max(short3 x, short3 y);
-ushort3 __ovld __cnfn max(ushort3 x, ushort3 y);
-short4 __ovld __cnfn max(short4 x, short4 y);
-ushort4 __ovld __cnfn max(ushort4 x, ushort4 y);
-short8 __ovld __cnfn max(short8 x, short8 y);
-ushort8 __ovld __cnfn max(ushort8 x, ushort8 y);
-short16 __ovld __cnfn max(short16 x, short16 y);
-ushort16 __ovld __cnfn max(ushort16 x, ushort16 y);
-int __ovld __cnfn max(int x, int y);
-uint __ovld __cnfn max(uint x, uint y);
-int2 __ovld __cnfn max(int2 x, int2 y);
-uint2 __ovld __cnfn max(uint2 x, uint2 y);
-int3 __ovld __cnfn max(int3 x, int3 y);
-uint3 __ovld __cnfn max(uint3 x, uint3 y);
-int4 __ovld __cnfn max(int4 x, int4 y);
-uint4 __ovld __cnfn max(uint4 x, uint4 y);
-int8 __ovld __cnfn max(int8 x, int8 y);
-uint8 __ovld __cnfn max(uint8 x, uint8 y);
-int16 __ovld __cnfn max(int16 x, int16 y);
-uint16 __ovld __cnfn max(uint16 x, uint16 y);
-long __ovld __cnfn max(long x, long y);
-ulong __ovld __cnfn max(ulong x, ulong y);
-long2 __ovld __cnfn max(long2 x, long2 y);
-ulong2 __ovld __cnfn max(ulong2 x, ulong2 y);
-long3 __ovld __cnfn max(long3 x, long3 y);
-ulong3 __ovld __cnfn max(ulong3 x, ulong3 y);
-long4 __ovld __cnfn max(long4 x, long4 y);
-ulong4 __ovld __cnfn max(ulong4 x, ulong4 y);
-long8 __ovld __cnfn max(long8 x, long8 y);
-ulong8 __ovld __cnfn max(ulong8 x, ulong8 y);
-long16 __ovld __cnfn max(long16 x, long16 y);
-ulong16 __ovld __cnfn max(ulong16 x, ulong16 y);
-char2 __ovld __cnfn max(char2 x, char y);
-uchar2 __ovld __cnfn max(uchar2 x, uchar y);
-char3 __ovld __cnfn max(char3 x, char y);
-uchar3 __ovld __cnfn max(uchar3 x, uchar y);
-char4 __ovld __cnfn max(char4 x, char y);
-uchar4 __ovld __cnfn max(uchar4 x, uchar y);
-char8 __ovld __cnfn max(char8 x, char y);
-uchar8 __ovld __cnfn max(uchar8 x, uchar y);
-char16 __ovld __cnfn max(char16 x, char y);
-uchar16 __ovld __cnfn max(uchar16 x, uchar y);
-short2 __ovld __cnfn max(short2 x, short y);
-ushort2 __ovld __cnfn max(ushort2 x, ushort y);
-short3 __ovld __cnfn max(short3 x, short y);
-ushort3 __ovld __cnfn max(ushort3 x, ushort y);
-short4 __ovld __cnfn max(short4 x, short y);
-ushort4 __ovld __cnfn max(ushort4 x, ushort y);
-short8 __ovld __cnfn max(short8 x, short y);
-ushort8 __ovld __cnfn max(ushort8 x, ushort y);
-short16 __ovld __cnfn max(short16 x, short y);
-ushort16 __ovld __cnfn max(ushort16 x, ushort y);
-int2 __ovld __cnfn max(int2 x, int y);
-uint2 __ovld __cnfn max(uint2 x, uint y);
-int3 __ovld __cnfn max(int3 x, int y);
-uint3 __ovld __cnfn max(uint3 x, uint y);
-int4 __ovld __cnfn max(int4 x, int y);
-uint4 __ovld __cnfn max(uint4 x, uint y);
-int8 __ovld __cnfn max(int8 x, int y);
-uint8 __ovld __cnfn max(uint8 x, uint y);
-int16 __ovld __cnfn max(int16 x, int y);
-uint16 __ovld __cnfn max(uint16 x, uint y);
-long2 __ovld __cnfn max(long2 x, long y);
-ulong2 __ovld __cnfn max(ulong2 x, ulong y);
-long3 __ovld __cnfn max(long3 x, long y);
-ulong3 __ovld __cnfn max(ulong3 x, ulong y);
-long4 __ovld __cnfn max(long4 x, long y);
-ulong4 __ovld __cnfn max(ulong4 x, ulong y);
-long8 __ovld __cnfn max(long8 x, long y);
-ulong8 __ovld __cnfn max(ulong8 x, ulong y);
-long16 __ovld __cnfn max(long16 x, long y);
-ulong16 __ovld __cnfn max(ulong16 x, ulong y);
-
-/**
- * Returns y if y < x, otherwise it returns x.
- */
-char __ovld __cnfn min(char x, char y);
-uchar __ovld __cnfn min(uchar x, uchar y);
-char2 __ovld __cnfn min(char2 x, char2 y);
-uchar2 __ovld __cnfn min(uchar2 x, uchar2 y);
-char3 __ovld __cnfn min(char3 x, char3 y);
-uchar3 __ovld __cnfn min(uchar3 x, uchar3 y);
-char4 __ovld __cnfn min(char4 x, char4 y);
-uchar4 __ovld __cnfn min(uchar4 x, uchar4 y);
-char8 __ovld __cnfn min(char8 x, char8 y);
-uchar8 __ovld __cnfn min(uchar8 x, uchar8 y);
-char16 __ovld __cnfn min(char16 x, char16 y);
-uchar16 __ovld __cnfn min(uchar16 x, uchar16 y);
-short __ovld __cnfn min(short x, short y);
-ushort __ovld __cnfn min(ushort x, ushort y);
-short2 __ovld __cnfn min(short2 x, short2 y);
-ushort2 __ovld __cnfn min(ushort2 x, ushort2 y);
-short3 __ovld __cnfn min(short3 x, short3 y);
-ushort3 __ovld __cnfn min(ushort3 x, ushort3 y);
-short4 __ovld __cnfn min(short4 x, short4 y);
-ushort4 __ovld __cnfn min(ushort4 x, ushort4 y);
-short8 __ovld __cnfn min(short8 x, short8 y);
-ushort8 __ovld __cnfn min(ushort8 x, ushort8 y);
-short16 __ovld __cnfn min(short16 x, short16 y);
-ushort16 __ovld __cnfn min(ushort16 x, ushort16 y);
-int __ovld __cnfn min(int x, int y);
-uint __ovld __cnfn min(uint x, uint y);
-int2 __ovld __cnfn min(int2 x, int2 y);
-uint2 __ovld __cnfn min(uint2 x, uint2 y);
-int3 __ovld __cnfn min(int3 x, int3 y);
-uint3 __ovld __cnfn min(uint3 x, uint3 y);
-int4 __ovld __cnfn min(int4 x, int4 y);
-uint4 __ovld __cnfn min(uint4 x, uint4 y);
-int8 __ovld __cnfn min(int8 x, int8 y);
-uint8 __ovld __cnfn min(uint8 x, uint8 y);
-int16 __ovld __cnfn min(int16 x, int16 y);
-uint16 __ovld __cnfn min(uint16 x, uint16 y);
-long __ovld __cnfn min(long x, long y);
-ulong __ovld __cnfn min(ulong x, ulong y);
-long2 __ovld __cnfn min(long2 x, long2 y);
-ulong2 __ovld __cnfn min(ulong2 x, ulong2 y);
-long3 __ovld __cnfn min(long3 x, long3 y);
-ulong3 __ovld __cnfn min(ulong3 x, ulong3 y);
-long4 __ovld __cnfn min(long4 x, long4 y);
-ulong4 __ovld __cnfn min(ulong4 x, ulong4 y);
-long8 __ovld __cnfn min(long8 x, long8 y);
-ulong8 __ovld __cnfn min(ulong8 x, ulong8 y);
-long16 __ovld __cnfn min(long16 x, long16 y);
-ulong16 __ovld __cnfn min(ulong16 x, ulong16 y);
-char2 __ovld __cnfn min(char2 x, char y);
-uchar2 __ovld __cnfn min(uchar2 x, uchar y);
-char3 __ovld __cnfn min(char3 x, char y);
-uchar3 __ovld __cnfn min(uchar3 x, uchar y);
-char4 __ovld __cnfn min(char4 x, char y);
-uchar4 __ovld __cnfn min(uchar4 x, uchar y);
-char8 __ovld __cnfn min(char8 x, char y);
-uchar8 __ovld __cnfn min(uchar8 x, uchar y);
-char16 __ovld __cnfn min(char16 x, char y);
-uchar16 __ovld __cnfn min(uchar16 x, uchar y);
-short2 __ovld __cnfn min(short2 x, short y);
-ushort2 __ovld __cnfn min(ushort2 x, ushort y);
-short3 __ovld __cnfn min(short3 x, short y);
-ushort3 __ovld __cnfn min(ushort3 x, ushort y);
-short4 __ovld __cnfn min(short4 x, short y);
-ushort4 __ovld __cnfn min(ushort4 x, ushort y);
-short8 __ovld __cnfn min(short8 x, short y);
-ushort8 __ovld __cnfn min(ushort8 x, ushort y);
-short16 __ovld __cnfn min(short16 x, short y);
-ushort16 __ovld __cnfn min(ushort16 x, ushort y);
-int2 __ovld __cnfn min(int2 x, int y);
-uint2 __ovld __cnfn min(uint2 x, uint y);
-int3 __ovld __cnfn min(int3 x, int y);
-uint3 __ovld __cnfn min(uint3 x, uint y);
-int4 __ovld __cnfn min(int4 x, int y);
-uint4 __ovld __cnfn min(uint4 x, uint y);
-int8 __ovld __cnfn min(int8 x, int y);
-uint8 __ovld __cnfn min(uint8 x, uint y);
-int16 __ovld __cnfn min(int16 x, int y);
-uint16 __ovld __cnfn min(uint16 x, uint y);
-long2 __ovld __cnfn min(long2 x, long y);
-ulong2 __ovld __cnfn min(ulong2 x, ulong y);
-long3 __ovld __cnfn min(long3 x, long y);
-ulong3 __ovld __cnfn min(ulong3 x, ulong y);
-long4 __ovld __cnfn min(long4 x, long y);
-ulong4 __ovld __cnfn min(ulong4 x, ulong y);
-long8 __ovld __cnfn min(long8 x, long y);
-ulong8 __ovld __cnfn min(ulong8 x, ulong y);
-long16 __ovld __cnfn min(long16 x, long y);
-ulong16 __ovld __cnfn min(ulong16 x, ulong y);
-
-/**
- * Computes x * y and returns the high half of the
- * product of x and y.
- */
-char __ovld __cnfn mul_hi(char x, char y);
-uchar __ovld __cnfn mul_hi(uchar x, uchar y);
-char2 __ovld __cnfn mul_hi(char2 x, char2 y);
-uchar2 __ovld __cnfn mul_hi(uchar2 x, uchar2 y);
-char3 __ovld __cnfn mul_hi(char3 x, char3 y);
-uchar3 __ovld __cnfn mul_hi(uchar3 x, uchar3 y);
-char4 __ovld __cnfn mul_hi(char4 x, char4 y);
-uchar4 __ovld __cnfn mul_hi(uchar4 x, uchar4 y);
-char8 __ovld __cnfn mul_hi(char8 x, char8 y);
-uchar8 __ovld __cnfn mul_hi(uchar8 x, uchar8 y);
-char16 __ovld __cnfn mul_hi(char16 x, char16 y);
-uchar16 __ovld __cnfn mul_hi(uchar16 x, uchar16 y);
-short __ovld __cnfn mul_hi(short x, short y);
-ushort __ovld __cnfn mul_hi(ushort x, ushort y);
-short2 __ovld __cnfn mul_hi(short2 x, short2 y);
-ushort2 __ovld __cnfn mul_hi(ushort2 x, ushort2 y);
-short3 __ovld __cnfn mul_hi(short3 x, short3 y);
-ushort3 __ovld __cnfn mul_hi(ushort3 x, ushort3 y);
-short4 __ovld __cnfn mul_hi(short4 x, short4 y);
-ushort4 __ovld __cnfn mul_hi(ushort4 x, ushort4 y);
-short8 __ovld __cnfn mul_hi(short8 x, short8 y);
-ushort8 __ovld __cnfn mul_hi(ushort8 x, ushort8 y);
-short16 __ovld __cnfn mul_hi(short16 x, short16 y);
-ushort16 __ovld __cnfn mul_hi(ushort16 x, ushort16 y);
-int __ovld __cnfn mul_hi(int x, int y);
-uint __ovld __cnfn mul_hi(uint x, uint y);
-int2 __ovld __cnfn mul_hi(int2 x, int2 y);
-uint2 __ovld __cnfn mul_hi(uint2 x, uint2 y);
-int3 __ovld __cnfn mul_hi(int3 x, int3 y);
-uint3 __ovld __cnfn mul_hi(uint3 x, uint3 y);
-int4 __ovld __cnfn mul_hi(int4 x, int4 y);
-uint4 __ovld __cnfn mul_hi(uint4 x, uint4 y);
-int8 __ovld __cnfn mul_hi(int8 x, int8 y);
-uint8 __ovld __cnfn mul_hi(uint8 x, uint8 y);
-int16 __ovld __cnfn mul_hi(int16 x, int16 y);
-uint16 __ovld __cnfn mul_hi(uint16 x, uint16 y);
-long __ovld __cnfn mul_hi(long x, long y);
-ulong __ovld __cnfn mul_hi(ulong x, ulong y);
-long2 __ovld __cnfn mul_hi(long2 x, long2 y);
-ulong2 __ovld __cnfn mul_hi(ulong2 x, ulong2 y);
-long3 __ovld __cnfn mul_hi(long3 x, long3 y);
-ulong3 __ovld __cnfn mul_hi(ulong3 x, ulong3 y);
-long4 __ovld __cnfn mul_hi(long4 x, long4 y);
-ulong4 __ovld __cnfn mul_hi(ulong4 x, ulong4 y);
-long8 __ovld __cnfn mul_hi(long8 x, long8 y);
-ulong8 __ovld __cnfn mul_hi(ulong8 x, ulong8 y);
-long16 __ovld __cnfn mul_hi(long16 x, long16 y);
-ulong16 __ovld __cnfn mul_hi(ulong16 x, ulong16 y);
-
-/**
- * For each element in v, the bits are shifted left by
- * the number of bits given by the corresponding
- * element in i (subject to usual shift modulo rules
- * described in section 6.3). Bits shifted off the left
- * side of the element are shifted back in from the
- * right.
- */
-char __ovld __cnfn rotate(char v, char i);
-uchar __ovld __cnfn rotate(uchar v, uchar i);
-char2 __ovld __cnfn rotate(char2 v, char2 i);
-uchar2 __ovld __cnfn rotate(uchar2 v, uchar2 i);
-char3 __ovld __cnfn rotate(char3 v, char3 i);
-uchar3 __ovld __cnfn rotate(uchar3 v, uchar3 i);
-char4 __ovld __cnfn rotate(char4 v, char4 i);
-uchar4 __ovld __cnfn rotate(uchar4 v, uchar4 i);
-char8 __ovld __cnfn rotate(char8 v, char8 i);
-uchar8 __ovld __cnfn rotate(uchar8 v, uchar8 i);
-char16 __ovld __cnfn rotate(char16 v, char16 i);
-uchar16 __ovld __cnfn rotate(uchar16 v, uchar16 i);
-short __ovld __cnfn rotate(short v, short i);
-ushort __ovld __cnfn rotate(ushort v, ushort i);
-short2 __ovld __cnfn rotate(short2 v, short2 i);
-ushort2 __ovld __cnfn rotate(ushort2 v, ushort2 i);
-short3 __ovld __cnfn rotate(short3 v, short3 i);
-ushort3 __ovld __cnfn rotate(ushort3 v, ushort3 i);
-short4 __ovld __cnfn rotate(short4 v, short4 i);
-ushort4 __ovld __cnfn rotate(ushort4 v, ushort4 i);
-short8 __ovld __cnfn rotate(short8 v, short8 i);
-ushort8 __ovld __cnfn rotate(ushort8 v, ushort8 i);
-short16 __ovld __cnfn rotate(short16 v, short16 i);
-ushort16 __ovld __cnfn rotate(ushort16 v, ushort16 i);
-int __ovld __cnfn rotate(int v, int i);
-uint __ovld __cnfn rotate(uint v, uint i);
-int2 __ovld __cnfn rotate(int2 v, int2 i);
-uint2 __ovld __cnfn rotate(uint2 v, uint2 i);
-int3 __ovld __cnfn rotate(int3 v, int3 i);
-uint3 __ovld __cnfn rotate(uint3 v, uint3 i);
-int4 __ovld __cnfn rotate(int4 v, int4 i);
-uint4 __ovld __cnfn rotate(uint4 v, uint4 i);
-int8 __ovld __cnfn rotate(int8 v, int8 i);
-uint8 __ovld __cnfn rotate(uint8 v, uint8 i);
-int16 __ovld __cnfn rotate(int16 v, int16 i);
-uint16 __ovld __cnfn rotate(uint16 v, uint16 i);
-long __ovld __cnfn rotate(long v, long i);
-ulong __ovld __cnfn rotate(ulong v, ulong i);
-long2 __ovld __cnfn rotate(long2 v, long2 i);
-ulong2 __ovld __cnfn rotate(ulong2 v, ulong2 i);
-long3 __ovld __cnfn rotate(long3 v, long3 i);
-ulong3 __ovld __cnfn rotate(ulong3 v, ulong3 i);
-long4 __ovld __cnfn rotate(long4 v, long4 i);
-ulong4 __ovld __cnfn rotate(ulong4 v, ulong4 i);
-long8 __ovld __cnfn rotate(long8 v, long8 i);
-ulong8 __ovld __cnfn rotate(ulong8 v, ulong8 i);
-long16 __ovld __cnfn rotate(long16 v, long16 i);
-ulong16 __ovld __cnfn rotate(ulong16 v, ulong16 i);
-
-/**
- * Returns x - y and saturates the result.
- */
-char __ovld __cnfn sub_sat(char x, char y);
-uchar __ovld __cnfn sub_sat(uchar x, uchar y);
-char2 __ovld __cnfn sub_sat(char2 x, char2 y);
-uchar2 __ovld __cnfn sub_sat(uchar2 x, uchar2 y);
-char3 __ovld __cnfn sub_sat(char3 x, char3 y);
-uchar3 __ovld __cnfn sub_sat(uchar3 x, uchar3 y);
-char4 __ovld __cnfn sub_sat(char4 x, char4 y);
-uchar4 __ovld __cnfn sub_sat(uchar4 x, uchar4 y);
-char8 __ovld __cnfn sub_sat(char8 x, char8 y);
-uchar8 __ovld __cnfn sub_sat(uchar8 x, uchar8 y);
-char16 __ovld __cnfn sub_sat(char16 x, char16 y);
-uchar16 __ovld __cnfn sub_sat(uchar16 x, uchar16 y);
-short __ovld __cnfn sub_sat(short x, short y);
-ushort __ovld __cnfn sub_sat(ushort x, ushort y);
-short2 __ovld __cnfn sub_sat(short2 x, short2 y);
-ushort2 __ovld __cnfn sub_sat(ushort2 x, ushort2 y);
-short3 __ovld __cnfn sub_sat(short3 x, short3 y);
-ushort3 __ovld __cnfn sub_sat(ushort3 x, ushort3 y);
-short4 __ovld __cnfn sub_sat(short4 x, short4 y);
-ushort4 __ovld __cnfn sub_sat(ushort4 x, ushort4 y);
-short8 __ovld __cnfn sub_sat(short8 x, short8 y);
-ushort8 __ovld __cnfn sub_sat(ushort8 x, ushort8 y);
-short16 __ovld __cnfn sub_sat(short16 x, short16 y);
-ushort16 __ovld __cnfn sub_sat(ushort16 x, ushort16 y);
-int __ovld __cnfn sub_sat(int x, int y);
-uint __ovld __cnfn sub_sat(uint x, uint y);
-int2 __ovld __cnfn sub_sat(int2 x, int2 y);
-uint2 __ovld __cnfn sub_sat(uint2 x, uint2 y);
-int3 __ovld __cnfn sub_sat(int3 x, int3 y);
-uint3 __ovld __cnfn sub_sat(uint3 x, uint3 y);
-int4 __ovld __cnfn sub_sat(int4 x, int4 y);
-uint4 __ovld __cnfn sub_sat(uint4 x, uint4 y);
-int8 __ovld __cnfn sub_sat(int8 x, int8 y);
-uint8 __ovld __cnfn sub_sat(uint8 x, uint8 y);
-int16 __ovld __cnfn sub_sat(int16 x, int16 y);
-uint16 __ovld __cnfn sub_sat(uint16 x, uint16 y);
-long __ovld __cnfn sub_sat(long x, long y);
-ulong __ovld __cnfn sub_sat(ulong x, ulong y);
-long2 __ovld __cnfn sub_sat(long2 x, long2 y);
-ulong2 __ovld __cnfn sub_sat(ulong2 x, ulong2 y);
-long3 __ovld __cnfn sub_sat(long3 x, long3 y);
-ulong3 __ovld __cnfn sub_sat(ulong3 x, ulong3 y);
-long4 __ovld __cnfn sub_sat(long4 x, long4 y);
-ulong4 __ovld __cnfn sub_sat(ulong4 x, ulong4 y);
-long8 __ovld __cnfn sub_sat(long8 x, long8 y);
-ulong8 __ovld __cnfn sub_sat(ulong8 x, ulong8 y);
-long16 __ovld __cnfn sub_sat(long16 x, long16 y);
-ulong16 __ovld __cnfn sub_sat(ulong16 x, ulong16 y);
-
-/**
- * result[i] = ((short)hi[i] << 8) | lo[i]
- * result[i] = ((ushort)hi[i] << 8) | lo[i]
- */
-short __ovld __cnfn upsample(char hi, uchar lo);
-ushort __ovld __cnfn upsample(uchar hi, uchar lo);
-short2 __ovld __cnfn upsample(char2 hi, uchar2 lo);
-short3 __ovld __cnfn upsample(char3 hi, uchar3 lo);
-short4 __ovld __cnfn upsample(char4 hi, uchar4 lo);
-short8 __ovld __cnfn upsample(char8 hi, uchar8 lo);
-short16 __ovld __cnfn upsample(char16 hi, uchar16 lo);
-ushort2 __ovld __cnfn upsample(uchar2 hi, uchar2 lo);
-ushort3 __ovld __cnfn upsample(uchar3 hi, uchar3 lo);
-ushort4 __ovld __cnfn upsample(uchar4 hi, uchar4 lo);
-ushort8 __ovld __cnfn upsample(uchar8 hi, uchar8 lo);
-ushort16 __ovld __cnfn upsample(uchar16 hi, uchar16 lo);
-
-/**
- * result[i] = ((int)hi[i] << 16) | lo[i]
- * result[i] = ((uint)hi[i] << 16) | lo[i]
- */
-int __ovld __cnfn upsample(short hi, ushort lo);
-uint __ovld __cnfn upsample(ushort hi, ushort lo);
-int2 __ovld __cnfn upsample(short2 hi, ushort2 lo);
-int3 __ovld __cnfn upsample(short3 hi, ushort3 lo);
-int4 __ovld __cnfn upsample(short4 hi, ushort4 lo);
-int8 __ovld __cnfn upsample(short8 hi, ushort8 lo);
-int16 __ovld __cnfn upsample(short16 hi, ushort16 lo);
-uint2 __ovld __cnfn upsample(ushort2 hi, ushort2 lo);
-uint3 __ovld __cnfn upsample(ushort3 hi, ushort3 lo);
-uint4 __ovld __cnfn upsample(ushort4 hi, ushort4 lo);
-uint8 __ovld __cnfn upsample(ushort8 hi, ushort8 lo);
-uint16 __ovld __cnfn upsample(ushort16 hi, ushort16 lo);
-/**
- * result[i] = ((long)hi[i] << 32) | lo[i]
- * result[i] = ((ulong)hi[i] << 32) | lo[i]
- */
-long __ovld __cnfn upsample(int hi, uint lo);
-ulong __ovld __cnfn upsample(uint hi, uint lo);
-long2 __ovld __cnfn upsample(int2 hi, uint2 lo);
-long3 __ovld __cnfn upsample(int3 hi, uint3 lo);
-long4 __ovld __cnfn upsample(int4 hi, uint4 lo);
-long8 __ovld __cnfn upsample(int8 hi, uint8 lo);
-long16 __ovld __cnfn upsample(int16 hi, uint16 lo);
-ulong2 __ovld __cnfn upsample(uint2 hi, uint2 lo);
-ulong3 __ovld __cnfn upsample(uint3 hi, uint3 lo);
-ulong4 __ovld __cnfn upsample(uint4 hi, uint4 lo);
-ulong8 __ovld __cnfn upsample(uint8 hi, uint8 lo);
-ulong16 __ovld __cnfn upsample(uint16 hi, uint16 lo);
-
-/*
- * popcount(x): returns the number of set bit in x
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-char __ovld __cnfn popcount(char x);
-uchar __ovld __cnfn popcount(uchar x);
-char2 __ovld __cnfn popcount(char2 x);
-uchar2 __ovld __cnfn popcount(uchar2 x);
-char3 __ovld __cnfn popcount(char3 x);
-uchar3 __ovld __cnfn popcount(uchar3 x);
-char4 __ovld __cnfn popcount(char4 x);
-uchar4 __ovld __cnfn popcount(uchar4 x);
-char8 __ovld __cnfn popcount(char8 x);
-uchar8 __ovld __cnfn popcount(uchar8 x);
-char16 __ovld __cnfn popcount(char16 x);
-uchar16 __ovld __cnfn popcount(uchar16 x);
-short __ovld __cnfn popcount(short x);
-ushort __ovld __cnfn popcount(ushort x);
-short2 __ovld __cnfn popcount(short2 x);
-ushort2 __ovld __cnfn popcount(ushort2 x);
-short3 __ovld __cnfn popcount(short3 x);
-ushort3 __ovld __cnfn popcount(ushort3 x);
-short4 __ovld __cnfn popcount(short4 x);
-ushort4 __ovld __cnfn popcount(ushort4 x);
-short8 __ovld __cnfn popcount(short8 x);
-ushort8 __ovld __cnfn popcount(ushort8 x);
-short16 __ovld __cnfn popcount(short16 x);
-ushort16 __ovld __cnfn popcount(ushort16 x);
-int __ovld __cnfn popcount(int x);
-uint __ovld __cnfn popcount(uint x);
-int2 __ovld __cnfn popcount(int2 x);
-uint2 __ovld __cnfn popcount(uint2 x);
-int3 __ovld __cnfn popcount(int3 x);
-uint3 __ovld __cnfn popcount(uint3 x);
-int4 __ovld __cnfn popcount(int4 x);
-uint4 __ovld __cnfn popcount(uint4 x);
-int8 __ovld __cnfn popcount(int8 x);
-uint8 __ovld __cnfn popcount(uint8 x);
-int16 __ovld __cnfn popcount(int16 x);
-uint16 __ovld __cnfn popcount(uint16 x);
-long __ovld __cnfn popcount(long x);
-ulong __ovld __cnfn popcount(ulong x);
-long2 __ovld __cnfn popcount(long2 x);
-ulong2 __ovld __cnfn popcount(ulong2 x);
-long3 __ovld __cnfn popcount(long3 x);
-ulong3 __ovld __cnfn popcount(ulong3 x);
-long4 __ovld __cnfn popcount(long4 x);
-ulong4 __ovld __cnfn popcount(ulong4 x);
-long8 __ovld __cnfn popcount(long8 x);
-ulong8 __ovld __cnfn popcount(ulong8 x);
-long16 __ovld __cnfn popcount(long16 x);
-ulong16 __ovld __cnfn popcount(ulong16 x);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-/**
- * Multiply two 24-bit integer values x and y and add
- * the 32-bit integer result to the 32-bit integer z.
- * Refer to definition of mul24 to see how the 24-bit
- * integer multiplication is performed.
- */
-int __ovld __cnfn mad24(int x, int y, int z);
-uint __ovld __cnfn mad24(uint x, uint y, uint z);
-int2 __ovld __cnfn mad24(int2 x, int2 y, int2 z);
-uint2 __ovld __cnfn mad24(uint2 x, uint2 y, uint2 z);
-int3 __ovld __cnfn mad24(int3 x, int3 y, int3 z);
-uint3 __ovld __cnfn mad24(uint3 x, uint3 y, uint3 z);
-int4 __ovld __cnfn mad24(int4 x, int4 y, int4 z);
-uint4 __ovld __cnfn mad24(uint4 x, uint4 y, uint4 z);
-int8 __ovld __cnfn mad24(int8 x, int8 y, int8 z);
-uint8 __ovld __cnfn mad24(uint8 x, uint8 y, uint8 z);
-int16 __ovld __cnfn mad24(int16 x, int16 y, int16 z);
-uint16 __ovld __cnfn mad24(uint16 x, uint16 y, uint16 z);
-
-/**
- * Multiply two 24-bit integer values x and y. x and y
- * are 32-bit integers but only the low 24-bits are used
- * to perform the multiplication. mul24 should only
- * be used when values in x and y are in the range [-
- * 2^23, 2^23-1] if x and y are signed integers and in the
- * range [0, 2^24-1] if x and y are unsigned integers. If
- * x and y are not in this range, the multiplication
- * result is implementation-defined.
- */
-int __ovld __cnfn mul24(int x, int y);
-uint __ovld __cnfn mul24(uint x, uint y);
-int2 __ovld __cnfn mul24(int2 x, int2 y);
-uint2 __ovld __cnfn mul24(uint2 x, uint2 y);
-int3 __ovld __cnfn mul24(int3 x, int3 y);
-uint3 __ovld __cnfn mul24(uint3 x, uint3 y);
-int4 __ovld __cnfn mul24(int4 x, int4 y);
-uint4 __ovld __cnfn mul24(uint4 x, uint4 y);
-int8 __ovld __cnfn mul24(int8 x, int8 y);
-uint8 __ovld __cnfn mul24(uint8 x, uint8 y);
-int16 __ovld __cnfn mul24(int16 x, int16 y);
-uint16 __ovld __cnfn mul24(uint16 x, uint16 y);
-
-// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions
-
-/**
- * Returns fmin(fmax(x, minval), maxval).
- * Results are undefined if minval > maxval.
- */
-float __ovld __cnfn clamp(float x, float minval, float maxval);
-float2 __ovld __cnfn clamp(float2 x, float2 minval, float2 maxval);
-float3 __ovld __cnfn clamp(float3 x, float3 minval, float3 maxval);
-float4 __ovld __cnfn clamp(float4 x, float4 minval, float4 maxval);
-float8 __ovld __cnfn clamp(float8 x, float8 minval, float8 maxval);
-float16 __ovld __cnfn clamp(float16 x, float16 minval, float16 maxval);
-float2 __ovld __cnfn clamp(float2 x, float minval, float maxval);
-float3 __ovld __cnfn clamp(float3 x, float minval, float maxval);
-float4 __ovld __cnfn clamp(float4 x, float minval, float maxval);
-float8 __ovld __cnfn clamp(float8 x, float minval, float maxval);
-float16 __ovld __cnfn clamp(float16 x, float minval, float maxval);
-#ifdef cl_khr_fp64
-double __ovld __cnfn clamp(double x, double minval, double maxval);
-double2 __ovld __cnfn clamp(double2 x, double2 minval, double2 maxval);
-double3 __ovld __cnfn clamp(double3 x, double3 minval, double3 maxval);
-double4 __ovld __cnfn clamp(double4 x, double4 minval, double4 maxval);
-double8 __ovld __cnfn clamp(double8 x, double8 minval, double8 maxval);
-double16 __ovld __cnfn clamp(double16 x, double16 minval, double16 maxval);
-double2 __ovld __cnfn clamp(double2 x, double minval, double maxval);
-double3 __ovld __cnfn clamp(double3 x, double minval, double maxval);
-double4 __ovld __cnfn clamp(double4 x, double minval, double maxval);
-double8 __ovld __cnfn clamp(double8 x, double minval, double maxval);
-double16 __ovld __cnfn clamp(double16 x, double minval, double maxval);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn clamp(half x, half minval, half maxval);
-half2 __ovld __cnfn clamp(half2 x, half2 minval, half2 maxval);
-half3 __ovld __cnfn clamp(half3 x, half3 minval, half3 maxval);
-half4 __ovld __cnfn clamp(half4 x, half4 minval, half4 maxval);
-half8 __ovld __cnfn clamp(half8 x, half8 minval, half8 maxval);
-half16 __ovld __cnfn clamp(half16 x, half16 minval, half16 maxval);
-half2 __ovld __cnfn clamp(half2 x, half minval, half maxval);
-half3 __ovld __cnfn clamp(half3 x, half minval, half maxval);
-half4 __ovld __cnfn clamp(half4 x, half minval, half maxval);
-half8 __ovld __cnfn clamp(half8 x, half minval, half maxval);
-half16 __ovld __cnfn clamp(half16 x, half minval, half maxval);
-#endif //cl_khr_fp16
-
-/**
- * Converts radians to degrees, i.e. (180 / PI) *
- * radians.
- */
-float __ovld __cnfn degrees(float radians);
-float2 __ovld __cnfn degrees(float2 radians);
-float3 __ovld __cnfn degrees(float3 radians);
-float4 __ovld __cnfn degrees(float4 radians);
-float8 __ovld __cnfn degrees(float8 radians);
-float16 __ovld __cnfn degrees(float16 radians);
-#ifdef cl_khr_fp64
-double __ovld __cnfn degrees(double radians);
-double2 __ovld __cnfn degrees(double2 radians);
-double3 __ovld __cnfn degrees(double3 radians);
-double4 __ovld __cnfn degrees(double4 radians);
-double8 __ovld __cnfn degrees(double8 radians);
-double16 __ovld __cnfn degrees(double16 radians);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn degrees(half radians);
-half2 __ovld __cnfn degrees(half2 radians);
-half3 __ovld __cnfn degrees(half3 radians);
-half4 __ovld __cnfn degrees(half4 radians);
-half8 __ovld __cnfn degrees(half8 radians);
-half16 __ovld __cnfn degrees(half16 radians);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if x < y, otherwise it returns x. If x and y
- * are infinite or NaN, the return values are undefined.
- */
-float __ovld __cnfn max(float x, float y);
-float2 __ovld __cnfn max(float2 x, float2 y);
-float3 __ovld __cnfn max(float3 x, float3 y);
-float4 __ovld __cnfn max(float4 x, float4 y);
-float8 __ovld __cnfn max(float8 x, float8 y);
-float16 __ovld __cnfn max(float16 x, float16 y);
-float2 __ovld __cnfn max(float2 x, float y);
-float3 __ovld __cnfn max(float3 x, float y);
-float4 __ovld __cnfn max(float4 x, float y);
-float8 __ovld __cnfn max(float8 x, float y);
-float16 __ovld __cnfn max(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn max(double x, double y);
-double2 __ovld __cnfn max(double2 x, double2 y);
-double3 __ovld __cnfn max(double3 x, double3 y);
-double4 __ovld __cnfn max(double4 x, double4 y);
-double8 __ovld __cnfn max(double8 x, double8 y);
-double16 __ovld __cnfn max(double16 x, double16 y);
-double2 __ovld __cnfn max(double2 x, double y);
-double3 __ovld __cnfn max(double3 x, double y);
-double4 __ovld __cnfn max(double4 x, double y);
-double8 __ovld __cnfn max(double8 x, double y);
-double16 __ovld __cnfn max(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn max(half x, half y);
-half2 __ovld __cnfn max(half2 x, half2 y);
-half3 __ovld __cnfn max(half3 x, half3 y);
-half4 __ovld __cnfn max(half4 x, half4 y);
-half8 __ovld __cnfn max(half8 x, half8 y);
-half16 __ovld __cnfn max(half16 x, half16 y);
-half2 __ovld __cnfn max(half2 x, half y);
-half3 __ovld __cnfn max(half3 x, half y);
-half4 __ovld __cnfn max(half4 x, half y);
-half8 __ovld __cnfn max(half8 x, half y);
-half16 __ovld __cnfn max(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if y < x, otherwise it returns x. If x and y
- * are infinite or NaN, the return values are undefined.
- */
-float __ovld __cnfn min(float x, float y);
-float2 __ovld __cnfn min(float2 x, float2 y);
-float3 __ovld __cnfn min(float3 x, float3 y);
-float4 __ovld __cnfn min(float4 x, float4 y);
-float8 __ovld __cnfn min(float8 x, float8 y);
-float16 __ovld __cnfn min(float16 x, float16 y);
-float2 __ovld __cnfn min(float2 x, float y);
-float3 __ovld __cnfn min(float3 x, float y);
-float4 __ovld __cnfn min(float4 x, float y);
-float8 __ovld __cnfn min(float8 x, float y);
-float16 __ovld __cnfn min(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn min(double x, double y);
-double2 __ovld __cnfn min(double2 x, double2 y);
-double3 __ovld __cnfn min(double3 x, double3 y);
-double4 __ovld __cnfn min(double4 x, double4 y);
-double8 __ovld __cnfn min(double8 x, double8 y);
-double16 __ovld __cnfn min(double16 x, double16 y);
-double2 __ovld __cnfn min(double2 x, double y);
-double3 __ovld __cnfn min(double3 x, double y);
-double4 __ovld __cnfn min(double4 x, double y);
-double8 __ovld __cnfn min(double8 x, double y);
-double16 __ovld __cnfn min(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn min(half x, half y);
-half2 __ovld __cnfn min(half2 x, half2 y);
-half3 __ovld __cnfn min(half3 x, half3 y);
-half4 __ovld __cnfn min(half4 x, half4 y);
-half8 __ovld __cnfn min(half8 x, half8 y);
-half16 __ovld __cnfn min(half16 x, half16 y);
-half2 __ovld __cnfn min(half2 x, half y);
-half3 __ovld __cnfn min(half3 x, half y);
-half4 __ovld __cnfn min(half4 x, half y);
-half8 __ovld __cnfn min(half8 x, half y);
-half16 __ovld __cnfn min(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the linear blend of x & y implemented as:
- * x + (y - x) * a
- * a must be a value in the range 0.0 ... 1.0. If a is not
- * in the range 0.0 ... 1.0, the return values are
- * undefined.
- */
-float __ovld __cnfn mix(float x, float y, float a);
-float2 __ovld __cnfn mix(float2 x, float2 y, float2 a);
-float3 __ovld __cnfn mix(float3 x, float3 y, float3 a);
-float4 __ovld __cnfn mix(float4 x, float4 y, float4 a);
-float8 __ovld __cnfn mix(float8 x, float8 y, float8 a);
-float16 __ovld __cnfn mix(float16 x, float16 y, float16 a);
-float2 __ovld __cnfn mix(float2 x, float2 y, float a);
-float3 __ovld __cnfn mix(float3 x, float3 y, float a);
-float4 __ovld __cnfn mix(float4 x, float4 y, float a);
-float8 __ovld __cnfn mix(float8 x, float8 y, float a);
-float16 __ovld __cnfn mix(float16 x, float16 y, float a);
-#ifdef cl_khr_fp64
-double __ovld __cnfn mix(double x, double y, double a);
-double2 __ovld __cnfn mix(double2 x, double2 y, double2 a);
-double3 __ovld __cnfn mix(double3 x, double3 y, double3 a);
-double4 __ovld __cnfn mix(double4 x, double4 y, double4 a);
-double8 __ovld __cnfn mix(double8 x, double8 y, double8 a);
-double16 __ovld __cnfn mix(double16 x, double16 y, double16 a);
-double2 __ovld __cnfn mix(double2 x, double2 y, double a);
-double3 __ovld __cnfn mix(double3 x, double3 y, double a);
-double4 __ovld __cnfn mix(double4 x, double4 y, double a);
-double8 __ovld __cnfn mix(double8 x, double8 y, double a);
-double16 __ovld __cnfn mix(double16 x, double16 y, double a);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn mix(half x, half y, half a);
-half2 __ovld __cnfn mix(half2 x, half2 y, half2 a);
-half3 __ovld __cnfn mix(half3 x, half3 y, half3 a);
-half4 __ovld __cnfn mix(half4 x, half4 y, half4 a);
-half8 __ovld __cnfn mix(half8 x, half8 y, half8 a);
-half16 __ovld __cnfn mix(half16 x, half16 y, half16 a);
-half2 __ovld __cnfn mix(half2 x, half2 y, half a);
-half3 __ovld __cnfn mix(half3 x, half3 y, half a);
-half4 __ovld __cnfn mix(half4 x, half4 y, half a);
-half8 __ovld __cnfn mix(half8 x, half8 y, half a);
-half16 __ovld __cnfn mix(half16 x, half16 y, half a);
-#endif //cl_khr_fp16
-
-/**
- * Converts degrees to radians, i.e. (PI / 180) *
- * degrees.
- */
-float __ovld __cnfn radians(float degrees);
-float2 __ovld __cnfn radians(float2 degrees);
-float3 __ovld __cnfn radians(float3 degrees);
-float4 __ovld __cnfn radians(float4 degrees);
-float8 __ovld __cnfn radians(float8 degrees);
-float16 __ovld __cnfn radians(float16 degrees);
-#ifdef cl_khr_fp64
-double __ovld __cnfn radians(double degrees);
-double2 __ovld __cnfn radians(double2 degrees);
-double3 __ovld __cnfn radians(double3 degrees);
-double4 __ovld __cnfn radians(double4 degrees);
-double8 __ovld __cnfn radians(double8 degrees);
-double16 __ovld __cnfn radians(double16 degrees);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn radians(half degrees);
-half2 __ovld __cnfn radians(half2 degrees);
-half3 __ovld __cnfn radians(half3 degrees);
-half4 __ovld __cnfn radians(half4 degrees);
-half8 __ovld __cnfn radians(half8 degrees);
-half16 __ovld __cnfn radians(half16 degrees);
-#endif //cl_khr_fp16
-
-/**
- * Returns 0.0 if x < edge, otherwise it returns 1.0.
- */
-float __ovld __cnfn step(float edge, float x);
-float2 __ovld __cnfn step(float2 edge, float2 x);
-float3 __ovld __cnfn step(float3 edge, float3 x);
-float4 __ovld __cnfn step(float4 edge, float4 x);
-float8 __ovld __cnfn step(float8 edge, float8 x);
-float16 __ovld __cnfn step(float16 edge, float16 x);
-float2 __ovld __cnfn step(float edge, float2 x);
-float3 __ovld __cnfn step(float edge, float3 x);
-float4 __ovld __cnfn step(float edge, float4 x);
-float8 __ovld __cnfn step(float edge, float8 x);
-float16 __ovld __cnfn step(float edge, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn step(double edge, double x);
-double2 __ovld __cnfn step(double2 edge, double2 x);
-double3 __ovld __cnfn step(double3 edge, double3 x);
-double4 __ovld __cnfn step(double4 edge, double4 x);
-double8 __ovld __cnfn step(double8 edge, double8 x);
-double16 __ovld __cnfn step(double16 edge, double16 x);
-double2 __ovld __cnfn step(double edge, double2 x);
-double3 __ovld __cnfn step(double edge, double3 x);
-double4 __ovld __cnfn step(double edge, double4 x);
-double8 __ovld __cnfn step(double edge, double8 x);
-double16 __ovld __cnfn step(double edge, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn step(half edge, half x);
-half2 __ovld __cnfn step(half2 edge, half2 x);
-half3 __ovld __cnfn step(half3 edge, half3 x);
-half4 __ovld __cnfn step(half4 edge, half4 x);
-half8 __ovld __cnfn step(half8 edge, half8 x);
-half16 __ovld __cnfn step(half16 edge, half16 x);
-half2 __ovld __cnfn step(half edge, half2 x);
-half3 __ovld __cnfn step(half edge, half3 x);
-half4 __ovld __cnfn step(half edge, half4 x);
-half8 __ovld __cnfn step(half edge, half8 x);
-half16 __ovld __cnfn step(half edge, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and
- * performs smooth Hermite interpolation between 0
- * and 1when edge0 < x < edge1. This is useful in
- * cases where you would want a threshold function
- * with a smooth transition.
- * This is equivalent to:
- * gentype t;
- * t = clamp ((x - edge0) / (edge1 - edge0), 0, 1);
- * return t * t * (3 - 2 * t);
- * Results are undefined if edge0 >= edge1 or if x,
- * edge0 or edge1 is a NaN.
- */
-float __ovld __cnfn smoothstep(float edge0, float edge1, float x);
-float2 __ovld __cnfn smoothstep(float2 edge0, float2 edge1, float2 x);
-float3 __ovld __cnfn smoothstep(float3 edge0, float3 edge1, float3 x);
-float4 __ovld __cnfn smoothstep(float4 edge0, float4 edge1, float4 x);
-float8 __ovld __cnfn smoothstep(float8 edge0, float8 edge1, float8 x);
-float16 __ovld __cnfn smoothstep(float16 edge0, float16 edge1, float16 x);
-float2 __ovld __cnfn smoothstep(float edge0, float edge1, float2 x);
-float3 __ovld __cnfn smoothstep(float edge0, float edge1, float3 x);
-float4 __ovld __cnfn smoothstep(float edge0, float edge1, float4 x);
-float8 __ovld __cnfn smoothstep(float edge0, float edge1, float8 x);
-float16 __ovld __cnfn smoothstep(float edge0, float edge1, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn smoothstep(double edge0, double edge1, double x);
-double2 __ovld __cnfn smoothstep(double2 edge0, double2 edge1, double2 x);
-double3 __ovld __cnfn smoothstep(double3 edge0, double3 edge1, double3 x);
-double4 __ovld __cnfn smoothstep(double4 edge0, double4 edge1, double4 x);
-double8 __ovld __cnfn smoothstep(double8 edge0, double8 edge1, double8 x);
-double16 __ovld __cnfn smoothstep(double16 edge0, double16 edge1, double16 x);
-double2 __ovld __cnfn smoothstep(double edge0, double edge1, double2 x);
-double3 __ovld __cnfn smoothstep(double edge0, double edge1, double3 x);
-double4 __ovld __cnfn smoothstep(double edge0, double edge1, double4 x);
-double8 __ovld __cnfn smoothstep(double edge0, double edge1, double8 x);
-double16 __ovld __cnfn smoothstep(double edge0, double edge1, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn smoothstep(half edge0, half edge1, half x);
-half2 __ovld __cnfn smoothstep(half2 edge0, half2 edge1, half2 x);
-half3 __ovld __cnfn smoothstep(half3 edge0, half3 edge1, half3 x);
-half4 __ovld __cnfn smoothstep(half4 edge0, half4 edge1, half4 x);
-half8 __ovld __cnfn smoothstep(half8 edge0, half8 edge1, half8 x);
-half16 __ovld __cnfn smoothstep(half16 edge0, half16 edge1, half16 x);
-half2 __ovld __cnfn smoothstep(half edge0, half edge1, half2 x);
-half3 __ovld __cnfn smoothstep(half edge0, half edge1, half3 x);
-half4 __ovld __cnfn smoothstep(half edge0, half edge1, half4 x);
-half8 __ovld __cnfn smoothstep(half edge0, half edge1, half8 x);
-half16 __ovld __cnfn smoothstep(half edge0, half edge1, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x =
- * +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN.
- */
-float __ovld __cnfn sign(float x);
-float2 __ovld __cnfn sign(float2 x);
-float3 __ovld __cnfn sign(float3 x);
-float4 __ovld __cnfn sign(float4 x);
-float8 __ovld __cnfn sign(float8 x);
-float16 __ovld __cnfn sign(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sign(double x);
-double2 __ovld __cnfn sign(double2 x);
-double3 __ovld __cnfn sign(double3 x);
-double4 __ovld __cnfn sign(double4 x);
-double8 __ovld __cnfn sign(double8 x);
-double16 __ovld __cnfn sign(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sign(half x);
-half2 __ovld __cnfn sign(half2 x);
-half3 __ovld __cnfn sign(half3 x);
-half4 __ovld __cnfn sign(half4 x);
-half8 __ovld __cnfn sign(half8 x);
-half16 __ovld __cnfn sign(half16 x);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions
-
-/**
- * Returns the cross product of p0.xyz and p1.xyz. The
- * w component of float4 result returned will be 0.0.
- */
-float4 __ovld __cnfn cross(float4 p0, float4 p1);
-float3 __ovld __cnfn cross(float3 p0, float3 p1);
-#ifdef cl_khr_fp64
-double4 __ovld __cnfn cross(double4 p0, double4 p1);
-double3 __ovld __cnfn cross(double3 p0, double3 p1);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half4 __ovld __cnfn cross(half4 p0, half4 p1);
-half3 __ovld __cnfn cross(half3 p0, half3 p1);
-#endif //cl_khr_fp16
-
-/**
- * Compute dot product.
- */
-float __ovld __cnfn dot(float p0, float p1);
-float __ovld __cnfn dot(float2 p0, float2 p1);
-float __ovld __cnfn dot(float3 p0, float3 p1);
-float __ovld __cnfn dot(float4 p0, float4 p1);
-#ifdef cl_khr_fp64
-double __ovld __cnfn dot(double p0, double p1);
-double __ovld __cnfn dot(double2 p0, double2 p1);
-double __ovld __cnfn dot(double3 p0, double3 p1);
-double __ovld __cnfn dot(double4 p0, double4 p1);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn dot(half p0, half p1);
-half __ovld __cnfn dot(half2 p0, half2 p1);
-half __ovld __cnfn dot(half3 p0, half3 p1);
-half __ovld __cnfn dot(half4 p0, half4 p1);
-#endif //cl_khr_fp16
-
-/**
- * Returns the distance between p0 and p1. This is
- * calculated as length(p0 - p1).
- */
-float __ovld __cnfn distance(float p0, float p1);
-float __ovld __cnfn distance(float2 p0, float2 p1);
-float __ovld __cnfn distance(float3 p0, float3 p1);
-float __ovld __cnfn distance(float4 p0, float4 p1);
-#ifdef cl_khr_fp64
-double __ovld __cnfn distance(double p0, double p1);
-double __ovld __cnfn distance(double2 p0, double2 p1);
-double __ovld __cnfn distance(double3 p0, double3 p1);
-double __ovld __cnfn distance(double4 p0, double4 p1);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn distance(half p0, half p1);
-half __ovld __cnfn distance(half2 p0, half2 p1);
-half __ovld __cnfn distance(half3 p0, half3 p1);
-half __ovld __cnfn distance(half4 p0, half4 p1);
-#endif //cl_khr_fp16
-
-/**
- * Return the length of vector p, i.e.,
- * sqrt(p.x2 + p.y 2 + ...)
- */
-float __ovld __cnfn length(float p);
-float __ovld __cnfn length(float2 p);
-float __ovld __cnfn length(float3 p);
-float __ovld __cnfn length(float4 p);
-#ifdef cl_khr_fp64
-double __ovld __cnfn length(double p);
-double __ovld __cnfn length(double2 p);
-double __ovld __cnfn length(double3 p);
-double __ovld __cnfn length(double4 p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn length(half p);
-half __ovld __cnfn length(half2 p);
-half __ovld __cnfn length(half3 p);
-half __ovld __cnfn length(half4 p);
-#endif //cl_khr_fp16
-
-/**
- * Returns a vector in the same direction as p but with a
- * length of 1.
- */
-float __ovld __cnfn normalize(float p);
-float2 __ovld __cnfn normalize(float2 p);
-float3 __ovld __cnfn normalize(float3 p);
-float4 __ovld __cnfn normalize(float4 p);
-#ifdef cl_khr_fp64
-double __ovld __cnfn normalize(double p);
-double2 __ovld __cnfn normalize(double2 p);
-double3 __ovld __cnfn normalize(double3 p);
-double4 __ovld __cnfn normalize(double4 p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn normalize(half p);
-half2 __ovld __cnfn normalize(half2 p);
-half3 __ovld __cnfn normalize(half3 p);
-half4 __ovld __cnfn normalize(half4 p);
-#endif //cl_khr_fp16
-
-/**
- * Returns fast_length(p0 - p1).
- */
-float __ovld __cnfn fast_distance(float p0, float p1);
-float __ovld __cnfn fast_distance(float2 p0, float2 p1);
-float __ovld __cnfn fast_distance(float3 p0, float3 p1);
-float __ovld __cnfn fast_distance(float4 p0, float4 p1);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_distance(half p0, half p1);
-half __ovld __cnfn fast_distance(half2 p0, half2 p1);
-half __ovld __cnfn fast_distance(half3 p0, half3 p1);
-half __ovld __cnfn fast_distance(half4 p0, half4 p1);
-#endif //cl_khr_fp16
-
-/**
- * Returns the length of vector p computed as:
- * half_sqrt(p.x2 + p.y2 + ...)
- */
-float __ovld __cnfn fast_length(float p);
-float __ovld __cnfn fast_length(float2 p);
-float __ovld __cnfn fast_length(float3 p);
-float __ovld __cnfn fast_length(float4 p);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_length(half p);
-half __ovld __cnfn fast_length(half2 p);
-half __ovld __cnfn fast_length(half3 p);
-half __ovld __cnfn fast_length(half4 p);
-#endif //cl_khr_fp16
-
-/**
- * Returns a vector in the same direction as p but with a
- * length of 1. fast_normalize is computed as:
- * p * half_rsqrt (p.x^2 + p.y^2 + ... )
- * The result shall be within 8192 ulps error from the
- * infinitely precise result of
- * if (all(p == 0.0f))
- * result = p;
- * else
- * result = p / sqrt (p.x^2 + p.y^2 + ...);
- * with the following exceptions:
- * 1) If the sum of squares is greater than FLT_MAX
- * then the value of the floating-point values in the
- * result vector are undefined.
- * 2) If the sum of squares is less than FLT_MIN then
- * the implementation may return back p.
- * 3) If the device is in "denorms are flushed to zero"
- * mode, individual operand elements with magnitude
- * less than sqrt(FLT_MIN) may be flushed to zero
- * before proceeding with the calculation.
- */
-float __ovld __cnfn fast_normalize(float p);
-float2 __ovld __cnfn fast_normalize(float2 p);
-float3 __ovld __cnfn fast_normalize(float3 p);
-float4 __ovld __cnfn fast_normalize(float4 p);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_normalize(half p);
-half2 __ovld __cnfn fast_normalize(half2 p);
-half3 __ovld __cnfn fast_normalize(half3 p);
-half4 __ovld __cnfn fast_normalize(half4 p);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions
-
-/**
- * intn isequal (floatn x, floatn y)
- * Returns the component-wise compare of x == y.
- */
-int __ovld __cnfn isequal(float x, float y);
-int2 __ovld __cnfn isequal(float2 x, float2 y);
-int3 __ovld __cnfn isequal(float3 x, float3 y);
-int4 __ovld __cnfn isequal(float4 x, float4 y);
-int8 __ovld __cnfn isequal(float8 x, float8 y);
-int16 __ovld __cnfn isequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isequal(double x, double y);
-long2 __ovld __cnfn isequal(double2 x, double2 y);
-long3 __ovld __cnfn isequal(double3 x, double3 y);
-long4 __ovld __cnfn isequal(double4 x, double4 y);
-long8 __ovld __cnfn isequal(double8 x, double8 y);
-long16 __ovld __cnfn isequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isequal(half x, half y);
-short2 __ovld __cnfn isequal(half2 x, half2 y);
-short3 __ovld __cnfn isequal(half3 x, half3 y);
-short4 __ovld __cnfn isequal(half4 x, half4 y);
-short8 __ovld __cnfn isequal(half8 x, half8 y);
-short16 __ovld __cnfn isequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x != y.
- */
-int __ovld __cnfn isnotequal(float x, float y);
-int2 __ovld __cnfn isnotequal(float2 x, float2 y);
-int3 __ovld __cnfn isnotequal(float3 x, float3 y);
-int4 __ovld __cnfn isnotequal(float4 x, float4 y);
-int8 __ovld __cnfn isnotequal(float8 x, float8 y);
-int16 __ovld __cnfn isnotequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isnotequal(double x, double y);
-long2 __ovld __cnfn isnotequal(double2 x, double2 y);
-long3 __ovld __cnfn isnotequal(double3 x, double3 y);
-long4 __ovld __cnfn isnotequal(double4 x, double4 y);
-long8 __ovld __cnfn isnotequal(double8 x, double8 y);
-long16 __ovld __cnfn isnotequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isnotequal(half x, half y);
-short2 __ovld __cnfn isnotequal(half2 x, half2 y);
-short3 __ovld __cnfn isnotequal(half3 x, half3 y);
-short4 __ovld __cnfn isnotequal(half4 x, half4 y);
-short8 __ovld __cnfn isnotequal(half8 x, half8 y);
-short16 __ovld __cnfn isnotequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x > y.
- */
-int __ovld __cnfn isgreater(float x, float y);
-int2 __ovld __cnfn isgreater(float2 x, float2 y);
-int3 __ovld __cnfn isgreater(float3 x, float3 y);
-int4 __ovld __cnfn isgreater(float4 x, float4 y);
-int8 __ovld __cnfn isgreater(float8 x, float8 y);
-int16 __ovld __cnfn isgreater(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isgreater(double x, double y);
-long2 __ovld __cnfn isgreater(double2 x, double2 y);
-long3 __ovld __cnfn isgreater(double3 x, double3 y);
-long4 __ovld __cnfn isgreater(double4 x, double4 y);
-long8 __ovld __cnfn isgreater(double8 x, double8 y);
-long16 __ovld __cnfn isgreater(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isgreater(half x, half y);
-short2 __ovld __cnfn isgreater(half2 x, half2 y);
-short3 __ovld __cnfn isgreater(half3 x, half3 y);
-short4 __ovld __cnfn isgreater(half4 x, half4 y);
-short8 __ovld __cnfn isgreater(half8 x, half8 y);
-short16 __ovld __cnfn isgreater(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x >= y.
- */
-int __ovld __cnfn isgreaterequal(float x, float y);
-int2 __ovld __cnfn isgreaterequal(float2 x, float2 y);
-int3 __ovld __cnfn isgreaterequal(float3 x, float3 y);
-int4 __ovld __cnfn isgreaterequal(float4 x, float4 y);
-int8 __ovld __cnfn isgreaterequal(float8 x, float8 y);
-int16 __ovld __cnfn isgreaterequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isgreaterequal(double x, double y);
-long2 __ovld __cnfn isgreaterequal(double2 x, double2 y);
-long3 __ovld __cnfn isgreaterequal(double3 x, double3 y);
-long4 __ovld __cnfn isgreaterequal(double4 x, double4 y);
-long8 __ovld __cnfn isgreaterequal(double8 x, double8 y);
-long16 __ovld __cnfn isgreaterequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isgreaterequal(half x, half y);
-short2 __ovld __cnfn isgreaterequal(half2 x, half2 y);
-short3 __ovld __cnfn isgreaterequal(half3 x, half3 y);
-short4 __ovld __cnfn isgreaterequal(half4 x, half4 y);
-short8 __ovld __cnfn isgreaterequal(half8 x, half8 y);
-short16 __ovld __cnfn isgreaterequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x < y.
- */
-int __ovld __cnfn isless(float x, float y);
-int2 __ovld __cnfn isless(float2 x, float2 y);
-int3 __ovld __cnfn isless(float3 x, float3 y);
-int4 __ovld __cnfn isless(float4 x, float4 y);
-int8 __ovld __cnfn isless(float8 x, float8 y);
-int16 __ovld __cnfn isless(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isless(double x, double y);
-long2 __ovld __cnfn isless(double2 x, double2 y);
-long3 __ovld __cnfn isless(double3 x, double3 y);
-long4 __ovld __cnfn isless(double4 x, double4 y);
-long8 __ovld __cnfn isless(double8 x, double8 y);
-long16 __ovld __cnfn isless(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isless(half x, half y);
-short2 __ovld __cnfn isless(half2 x, half2 y);
-short3 __ovld __cnfn isless(half3 x, half3 y);
-short4 __ovld __cnfn isless(half4 x, half4 y);
-short8 __ovld __cnfn isless(half8 x, half8 y);
-short16 __ovld __cnfn isless(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x <= y.
- */
-int __ovld __cnfn islessequal(float x, float y);
-int2 __ovld __cnfn islessequal(float2 x, float2 y);
-int3 __ovld __cnfn islessequal(float3 x, float3 y);
-int4 __ovld __cnfn islessequal(float4 x, float4 y);
-int8 __ovld __cnfn islessequal(float8 x, float8 y);
-int16 __ovld __cnfn islessequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn islessequal(double x, double y);
-long2 __ovld __cnfn islessequal(double2 x, double2 y);
-long3 __ovld __cnfn islessequal(double3 x, double3 y);
-long4 __ovld __cnfn islessequal(double4 x, double4 y);
-long8 __ovld __cnfn islessequal(double8 x, double8 y);
-long16 __ovld __cnfn islessequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn islessequal(half x, half y);
-short2 __ovld __cnfn islessequal(half2 x, half2 y);
-short3 __ovld __cnfn islessequal(half3 x, half3 y);
-short4 __ovld __cnfn islessequal(half4 x, half4 y);
-short8 __ovld __cnfn islessequal(half8 x, half8 y);
-short16 __ovld __cnfn islessequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of
- * (x < y) || (x > y) .
- */
-int __ovld __cnfn islessgreater(float x, float y);
-int2 __ovld __cnfn islessgreater(float2 x, float2 y);
-int3 __ovld __cnfn islessgreater(float3 x, float3 y);
-int4 __ovld __cnfn islessgreater(float4 x, float4 y);
-int8 __ovld __cnfn islessgreater(float8 x, float8 y);
-int16 __ovld __cnfn islessgreater(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn islessgreater(double x, double y);
-long2 __ovld __cnfn islessgreater(double2 x, double2 y);
-long3 __ovld __cnfn islessgreater(double3 x, double3 y);
-long4 __ovld __cnfn islessgreater(double4 x, double4 y);
-long8 __ovld __cnfn islessgreater(double8 x, double8 y);
-long16 __ovld __cnfn islessgreater(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn islessgreater(half x, half y);
-short2 __ovld __cnfn islessgreater(half2 x, half2 y);
-short3 __ovld __cnfn islessgreater(half3 x, half3 y);
-short4 __ovld __cnfn islessgreater(half4 x, half4 y);
-short8 __ovld __cnfn islessgreater(half8 x, half8 y);
-short16 __ovld __cnfn islessgreater(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Test for finite value.
- */
-int __ovld __cnfn isfinite(float);
-int2 __ovld __cnfn isfinite(float2);
-int3 __ovld __cnfn isfinite(float3);
-int4 __ovld __cnfn isfinite(float4);
-int8 __ovld __cnfn isfinite(float8);
-int16 __ovld __cnfn isfinite(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isfinite(double);
-long2 __ovld __cnfn isfinite(double2);
-long3 __ovld __cnfn isfinite(double3);
-long4 __ovld __cnfn isfinite(double4);
-long8 __ovld __cnfn isfinite(double8);
-long16 __ovld __cnfn isfinite(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isfinite(half);
-short2 __ovld __cnfn isfinite(half2);
-short3 __ovld __cnfn isfinite(half3);
-short4 __ovld __cnfn isfinite(half4);
-short8 __ovld __cnfn isfinite(half8);
-short16 __ovld __cnfn isfinite(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test for infinity value (+ve or -ve) .
- */
-int __ovld __cnfn isinf(float);
-int2 __ovld __cnfn isinf(float2);
-int3 __ovld __cnfn isinf(float3);
-int4 __ovld __cnfn isinf(float4);
-int8 __ovld __cnfn isinf(float8);
-int16 __ovld __cnfn isinf(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isinf(double);
-long2 __ovld __cnfn isinf(double2);
-long3 __ovld __cnfn isinf(double3);
-long4 __ovld __cnfn isinf(double4);
-long8 __ovld __cnfn isinf(double8);
-long16 __ovld __cnfn isinf(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isinf(half);
-short2 __ovld __cnfn isinf(half2);
-short3 __ovld __cnfn isinf(half3);
-short4 __ovld __cnfn isinf(half4);
-short8 __ovld __cnfn isinf(half8);
-short16 __ovld __cnfn isinf(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test for a NaN.
- */
-int __ovld __cnfn isnan(float);
-int2 __ovld __cnfn isnan(float2);
-int3 __ovld __cnfn isnan(float3);
-int4 __ovld __cnfn isnan(float4);
-int8 __ovld __cnfn isnan(float8);
-int16 __ovld __cnfn isnan(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isnan(double);
-long2 __ovld __cnfn isnan(double2);
-long3 __ovld __cnfn isnan(double3);
-long4 __ovld __cnfn isnan(double4);
-long8 __ovld __cnfn isnan(double8);
-long16 __ovld __cnfn isnan(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isnan(half);
-short2 __ovld __cnfn isnan(half2);
-short3 __ovld __cnfn isnan(half3);
-short4 __ovld __cnfn isnan(half4);
-short8 __ovld __cnfn isnan(half8);
-short16 __ovld __cnfn isnan(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test for a normal value.
- */
-int __ovld __cnfn isnormal(float);
-int2 __ovld __cnfn isnormal(float2);
-int3 __ovld __cnfn isnormal(float3);
-int4 __ovld __cnfn isnormal(float4);
-int8 __ovld __cnfn isnormal(float8);
-int16 __ovld __cnfn isnormal(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isnormal(double);
-long2 __ovld __cnfn isnormal(double2);
-long3 __ovld __cnfn isnormal(double3);
-long4 __ovld __cnfn isnormal(double4);
-long8 __ovld __cnfn isnormal(double8);
-long16 __ovld __cnfn isnormal(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isnormal(half);
-short2 __ovld __cnfn isnormal(half2);
-short3 __ovld __cnfn isnormal(half3);
-short4 __ovld __cnfn isnormal(half4);
-short8 __ovld __cnfn isnormal(half8);
-short16 __ovld __cnfn isnormal(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test if arguments are ordered. isordered() takes
- * arguments x and y, and returns the result
- * isequal(x, x) && isequal(y, y).
- */
-int __ovld __cnfn isordered(float x, float y);
-int2 __ovld __cnfn isordered(float2 x, float2 y);
-int3 __ovld __cnfn isordered(float3 x, float3 y);
-int4 __ovld __cnfn isordered(float4 x, float4 y);
-int8 __ovld __cnfn isordered(float8 x, float8 y);
-int16 __ovld __cnfn isordered(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isordered(double x, double y);
-long2 __ovld __cnfn isordered(double2 x, double2 y);
-long3 __ovld __cnfn isordered(double3 x, double3 y);
-long4 __ovld __cnfn isordered(double4 x, double4 y);
-long8 __ovld __cnfn isordered(double8 x, double8 y);
-long16 __ovld __cnfn isordered(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isordered(half x, half y);
-short2 __ovld __cnfn isordered(half2 x, half2 y);
-short3 __ovld __cnfn isordered(half3 x, half3 y);
-short4 __ovld __cnfn isordered(half4 x, half4 y);
-short8 __ovld __cnfn isordered(half8 x, half8 y);
-short16 __ovld __cnfn isordered(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Test if arguments are unordered. isunordered()
- * takes arguments x and y, returning non-zero if x or y
- * is NaN, and zero otherwise.
- */
-int __ovld __cnfn isunordered(float x, float y);
-int2 __ovld __cnfn isunordered(float2 x, float2 y);
-int3 __ovld __cnfn isunordered(float3 x, float3 y);
-int4 __ovld __cnfn isunordered(float4 x, float4 y);
-int8 __ovld __cnfn isunordered(float8 x, float8 y);
-int16 __ovld __cnfn isunordered(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isunordered(double x, double y);
-long2 __ovld __cnfn isunordered(double2 x, double2 y);
-long3 __ovld __cnfn isunordered(double3 x, double3 y);
-long4 __ovld __cnfn isunordered(double4 x, double4 y);
-long8 __ovld __cnfn isunordered(double8 x, double8 y);
-long16 __ovld __cnfn isunordered(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isunordered(half x, half y);
-short2 __ovld __cnfn isunordered(half2 x, half2 y);
-short3 __ovld __cnfn isunordered(half3 x, half3 y);
-short4 __ovld __cnfn isunordered(half4 x, half4 y);
-short8 __ovld __cnfn isunordered(half8 x, half8 y);
-short16 __ovld __cnfn isunordered(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Test for sign bit. The scalar version of the function
- * returns a 1 if the sign bit in the float is set else returns
- * 0. The vector version of the function returns the
- * following for each component in floatn: a -1 if the
- * sign bit in the float is set else returns 0.
- */
-int __ovld __cnfn signbit(float);
-int2 __ovld __cnfn signbit(float2);
-int3 __ovld __cnfn signbit(float3);
-int4 __ovld __cnfn signbit(float4);
-int8 __ovld __cnfn signbit(float8);
-int16 __ovld __cnfn signbit(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn signbit(double);
-long2 __ovld __cnfn signbit(double2);
-long3 __ovld __cnfn signbit(double3);
-long4 __ovld __cnfn signbit(double4);
-long8 __ovld __cnfn signbit(double8);
-long16 __ovld __cnfn signbit(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn signbit(half);
-short2 __ovld __cnfn signbit(half2);
-short3 __ovld __cnfn signbit(half3);
-short4 __ovld __cnfn signbit(half4);
-short8 __ovld __cnfn signbit(half8);
-short16 __ovld __cnfn signbit(half16);
-#endif //cl_khr_fp16
-
-/**
- * Returns 1 if the most significant bit in any component
- * of x is set; otherwise returns 0.
- */
-int __ovld __cnfn any(char x);
-int __ovld __cnfn any(char2 x);
-int __ovld __cnfn any(char3 x);
-int __ovld __cnfn any(char4 x);
-int __ovld __cnfn any(char8 x);
-int __ovld __cnfn any(char16 x);
-int __ovld __cnfn any(short x);
-int __ovld __cnfn any(short2 x);
-int __ovld __cnfn any(short3 x);
-int __ovld __cnfn any(short4 x);
-int __ovld __cnfn any(short8 x);
-int __ovld __cnfn any(short16 x);
-int __ovld __cnfn any(int x);
-int __ovld __cnfn any(int2 x);
-int __ovld __cnfn any(int3 x);
-int __ovld __cnfn any(int4 x);
-int __ovld __cnfn any(int8 x);
-int __ovld __cnfn any(int16 x);
-int __ovld __cnfn any(long x);
-int __ovld __cnfn any(long2 x);
-int __ovld __cnfn any(long3 x);
-int __ovld __cnfn any(long4 x);
-int __ovld __cnfn any(long8 x);
-int __ovld __cnfn any(long16 x);
-
-/**
- * Returns 1 if the most significant bit in all components
- * of x is set; otherwise returns 0.
- */
-int __ovld __cnfn all(char x);
-int __ovld __cnfn all(char2 x);
-int __ovld __cnfn all(char3 x);
-int __ovld __cnfn all(char4 x);
-int __ovld __cnfn all(char8 x);
-int __ovld __cnfn all(char16 x);
-int __ovld __cnfn all(short x);
-int __ovld __cnfn all(short2 x);
-int __ovld __cnfn all(short3 x);
-int __ovld __cnfn all(short4 x);
-int __ovld __cnfn all(short8 x);
-int __ovld __cnfn all(short16 x);
-int __ovld __cnfn all(int x);
-int __ovld __cnfn all(int2 x);
-int __ovld __cnfn all(int3 x);
-int __ovld __cnfn all(int4 x);
-int __ovld __cnfn all(int8 x);
-int __ovld __cnfn all(int16 x);
-int __ovld __cnfn all(long x);
-int __ovld __cnfn all(long2 x);
-int __ovld __cnfn all(long3 x);
-int __ovld __cnfn all(long4 x);
-int __ovld __cnfn all(long8 x);
-int __ovld __cnfn all(long16 x);
-
-/**
- * Each bit of the result is the corresponding bit of a if
- * the corresponding bit of c is 0. Otherwise it is the
- * corresponding bit of b.
- */
-char __ovld __cnfn bitselect(char a, char b, char c);
-uchar __ovld __cnfn bitselect(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn bitselect(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn bitselect(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn bitselect(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn bitselect(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn bitselect(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn bitselect(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn bitselect(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn bitselect(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn bitselect(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn bitselect(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn bitselect(short a, short b, short c);
-ushort __ovld __cnfn bitselect(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn bitselect(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn bitselect(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn bitselect(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn bitselect(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn bitselect(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn bitselect(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn bitselect(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn bitselect(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn bitselect(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn bitselect(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn bitselect(int a, int b, int c);
-uint __ovld __cnfn bitselect(uint a, uint b, uint c);
-int2 __ovld __cnfn bitselect(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn bitselect(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn bitselect(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn bitselect(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn bitselect(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn bitselect(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn bitselect(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn bitselect(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn bitselect(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn bitselect(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn bitselect(long a, long b, long c);
-ulong __ovld __cnfn bitselect(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn bitselect(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn bitselect(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn bitselect(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn bitselect(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn bitselect(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn bitselect(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn bitselect(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn bitselect(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn bitselect(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn bitselect(ulong16 a, ulong16 b, ulong16 c);
-float __ovld __cnfn bitselect(float a, float b, float c);
-float2 __ovld __cnfn bitselect(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn bitselect(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn bitselect(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn bitselect(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn bitselect(float16 a, float16 b, float16 c);
-#ifdef cl_khr_fp64
-double __ovld __cnfn bitselect(double a, double b, double c);
-double2 __ovld __cnfn bitselect(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn bitselect(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn bitselect(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn bitselect(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn bitselect(double16 a, double16 b, double16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn bitselect(half a, half b, half c);
-half2 __ovld __cnfn bitselect(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn bitselect(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn bitselect(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn bitselect(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn bitselect(half16 a, half16 b, half16 c);
-#endif //cl_khr_fp16
-
-/**
- * For each component of a vector type,
- * result[i] = if MSB of c[i] is set ? b[i] : a[i].
- * For a scalar type, result = c ? b : a.
- * b and a must have the same type.
- * c must have the same number of elements and bits as a.
- */
-char __ovld __cnfn select(char a, char b, char c);
-uchar __ovld __cnfn select(uchar a, uchar b, char c);
-char2 __ovld __cnfn select(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, char2 c);
-char3 __ovld __cnfn select(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, char3 c);
-char4 __ovld __cnfn select(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, char4 c);
-char8 __ovld __cnfn select(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, char8 c);
-char16 __ovld __cnfn select(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, char16 c);
-
-short __ovld __cnfn select(short a, short b, short c);
-ushort __ovld __cnfn select(ushort a, ushort b, short c);
-short2 __ovld __cnfn select(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, short2 c);
-short3 __ovld __cnfn select(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, short3 c);
-short4 __ovld __cnfn select(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, short4 c);
-short8 __ovld __cnfn select(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, short8 c);
-short16 __ovld __cnfn select(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, short16 c);
-
-int __ovld __cnfn select(int a, int b, int c);
-uint __ovld __cnfn select(uint a, uint b, int c);
-int2 __ovld __cnfn select(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn select(uint2 a, uint2 b, int2 c);
-int3 __ovld __cnfn select(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn select(uint3 a, uint3 b, int3 c);
-int4 __ovld __cnfn select(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn select(uint4 a, uint4 b, int4 c);
-int8 __ovld __cnfn select(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn select(uint8 a, uint8 b, int8 c);
-int16 __ovld __cnfn select(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn select(uint16 a, uint16 b, int16 c);
-float __ovld __cnfn select(float a, float b, int c);
-float2 __ovld __cnfn select(float2 a, float2 b, int2 c);
-float3 __ovld __cnfn select(float3 a, float3 b, int3 c);
-float4 __ovld __cnfn select(float4 a, float4 b, int4 c);
-float8 __ovld __cnfn select(float8 a, float8 b, int8 c);
-float16 __ovld __cnfn select(float16 a, float16 b, int16 c);
-
-long __ovld __cnfn select(long a, long b, long c);
-ulong __ovld __cnfn select(ulong a, ulong b, long c);
-long2 __ovld __cnfn select(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, long2 c);
-long3 __ovld __cnfn select(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, long3 c);
-long4 __ovld __cnfn select(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, long4 c);
-long8 __ovld __cnfn select(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, long8 c);
-long16 __ovld __cnfn select(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, long16 c);
-
-char __ovld __cnfn select(char a, char b, uchar c);
-uchar __ovld __cnfn select(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn select(char2 a, char2 b, uchar2 c);
-uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn select(char3 a, char3 b, uchar3 c);
-uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn select(char4 a, char4 b, uchar4 c);
-uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn select(char8 a, char8 b, uchar8 c);
-uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn select(char16 a, char16 b, uchar16 c);
-uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uchar16 c);
-
-short __ovld __cnfn select(short a, short b, ushort c);
-ushort __ovld __cnfn select(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn select(short2 a, short2 b, ushort2 c);
-ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn select(short3 a, short3 b, ushort3 c);
-ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn select(short4 a, short4 b, ushort4 c);
-ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn select(short8 a, short8 b, ushort8 c);
-ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn select(short16 a, short16 b, ushort16 c);
-ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ushort16 c);
-
-int __ovld __cnfn select(int a, int b, uint c);
-uint __ovld __cnfn select(uint a, uint b, uint c);
-int2 __ovld __cnfn select(int2 a, int2 b, uint2 c);
-uint2 __ovld __cnfn select(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn select(int3 a, int3 b, uint3 c);
-uint3 __ovld __cnfn select(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn select(int4 a, int4 b, uint4 c);
-uint4 __ovld __cnfn select(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn select(int8 a, int8 b, uint8 c);
-uint8 __ovld __cnfn select(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn select(int16 a, int16 b, uint16 c);
-uint16 __ovld __cnfn select(uint16 a, uint16 b, uint16 c);
-float __ovld __cnfn select(float a, float b, uint c);
-float2 __ovld __cnfn select(float2 a, float2 b, uint2 c);
-float3 __ovld __cnfn select(float3 a, float3 b, uint3 c);
-float4 __ovld __cnfn select(float4 a, float4 b, uint4 c);
-float8 __ovld __cnfn select(float8 a, float8 b, uint8 c);
-float16 __ovld __cnfn select(float16 a, float16 b, uint16 c);
-
-long __ovld __cnfn select(long a, long b, ulong c);
-ulong __ovld __cnfn select(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn select(long2 a, long2 b, ulong2 c);
-ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn select(long3 a, long3 b, ulong3 c);
-ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn select(long4 a, long4 b, ulong4 c);
-ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn select(long8 a, long8 b, ulong8 c);
-ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn select(long16 a, long16 b, ulong16 c);
-ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ulong16 c);
-
-#ifdef cl_khr_fp64
-double __ovld __cnfn select(double a, double b, long c);
-double2 __ovld __cnfn select(double2 a, double2 b, long2 c);
-double3 __ovld __cnfn select(double3 a, double3 b, long3 c);
-double4 __ovld __cnfn select(double4 a, double4 b, long4 c);
-double8 __ovld __cnfn select(double8 a, double8 b, long8 c);
-double16 __ovld __cnfn select(double16 a, double16 b, long16 c);
-double __ovld __cnfn select(double a, double b, ulong c);
-double2 __ovld __cnfn select(double2 a, double2 b, ulong2 c);
-double3 __ovld __cnfn select(double3 a, double3 b, ulong3 c);
-double4 __ovld __cnfn select(double4 a, double4 b, ulong4 c);
-double8 __ovld __cnfn select(double8 a, double8 b, ulong8 c);
-double16 __ovld __cnfn select(double16 a, double16 b, ulong16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn select(half a, half b, short c);
-half2 __ovld __cnfn select(half2 a, half2 b, short2 c);
-half3 __ovld __cnfn select(half3 a, half3 b, short3 c);
-half4 __ovld __cnfn select(half4 a, half4 b, short4 c);
-half8 __ovld __cnfn select(half8 a, half8 b, short8 c);
-half16 __ovld __cnfn select(half16 a, half16 b, short16 c);
-half __ovld __cnfn select(half a, half b, ushort c);
-half2 __ovld __cnfn select(half2 a, half2 b, ushort2 c);
-half3 __ovld __cnfn select(half3 a, half3 b, ushort3 c);
-half4 __ovld __cnfn select(half4 a, half4 b, ushort4 c);
-half8 __ovld __cnfn select(half8 a, half8 b, ushort8 c);
-half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions
-// OpenCL extensions v1.1 s9.6.6, v1.2 s9.5.6, v2.0 s9.4.6 - Vector Data Load and Store Functions for Half Type
-/**
- * Use generic type gentype to indicate the built-in data types
- * char, uchar, short, ushort, int, uint, long, ulong, float,
- * double or half.
- *
- * vloadn return sizeof (gentypen) bytes of data read from address (p + (offset * n)).
- *
- * vstoren write sizeof (gentypen) bytes given by data to address (p + (offset * n)).
- *
- * The address computed as (p + (offset * n)) must be
- * 8-bit aligned if gentype is char, uchar;
- * 16-bit aligned if gentype is short, ushort, half;
- * 32-bit aligned if gentype is int, uint, float;
- * 64-bit aligned if gentype is long, ulong, double.
- */
-
-char2 __ovld vload2(size_t offset, const __constant char *p);
-uchar2 __ovld vload2(size_t offset, const __constant uchar *p);
-short2 __ovld vload2(size_t offset, const __constant short *p);
-ushort2 __ovld vload2(size_t offset, const __constant ushort *p);
-int2 __ovld vload2(size_t offset, const __constant int *p);
-uint2 __ovld vload2(size_t offset, const __constant uint *p);
-long2 __ovld vload2(size_t offset, const __constant long *p);
-ulong2 __ovld vload2(size_t offset, const __constant ulong *p);
-float2 __ovld vload2(size_t offset, const __constant float *p);
-char3 __ovld vload3(size_t offset, const __constant char *p);
-uchar3 __ovld vload3(size_t offset, const __constant uchar *p);
-short3 __ovld vload3(size_t offset, const __constant short *p);
-ushort3 __ovld vload3(size_t offset, const __constant ushort *p);
-int3 __ovld vload3(size_t offset, const __constant int *p);
-uint3 __ovld vload3(size_t offset, const __constant uint *p);
-long3 __ovld vload3(size_t offset, const __constant long *p);
-ulong3 __ovld vload3(size_t offset, const __constant ulong *p);
-float3 __ovld vload3(size_t offset, const __constant float *p);
-char4 __ovld vload4(size_t offset, const __constant char *p);
-uchar4 __ovld vload4(size_t offset, const __constant uchar *p);
-short4 __ovld vload4(size_t offset, const __constant short *p);
-ushort4 __ovld vload4(size_t offset, const __constant ushort *p);
-int4 __ovld vload4(size_t offset, const __constant int *p);
-uint4 __ovld vload4(size_t offset, const __constant uint *p);
-long4 __ovld vload4(size_t offset, const __constant long *p);
-ulong4 __ovld vload4(size_t offset, const __constant ulong *p);
-float4 __ovld vload4(size_t offset, const __constant float *p);
-char8 __ovld vload8(size_t offset, const __constant char *p);
-uchar8 __ovld vload8(size_t offset, const __constant uchar *p);
-short8 __ovld vload8(size_t offset, const __constant short *p);
-ushort8 __ovld vload8(size_t offset, const __constant ushort *p);
-int8 __ovld vload8(size_t offset, const __constant int *p);
-uint8 __ovld vload8(size_t offset, const __constant uint *p);
-long8 __ovld vload8(size_t offset, const __constant long *p);
-ulong8 __ovld vload8(size_t offset, const __constant ulong *p);
-float8 __ovld vload8(size_t offset, const __constant float *p);
-char16 __ovld vload16(size_t offset, const __constant char *p);
-uchar16 __ovld vload16(size_t offset, const __constant uchar *p);
-short16 __ovld vload16(size_t offset, const __constant short *p);
-ushort16 __ovld vload16(size_t offset, const __constant ushort *p);
-int16 __ovld vload16(size_t offset, const __constant int *p);
-uint16 __ovld vload16(size_t offset, const __constant uint *p);
-long16 __ovld vload16(size_t offset, const __constant long *p);
-ulong16 __ovld vload16(size_t offset, const __constant ulong *p);
-float16 __ovld vload16(size_t offset, const __constant float *p);
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __constant double *p);
-double3 __ovld vload3(size_t offset, const __constant double *p);
-double4 __ovld vload4(size_t offset, const __constant double *p);
-double8 __ovld vload8(size_t offset, const __constant double *p);
-double16 __ovld vload16(size_t offset, const __constant double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __constant half *p);
-half2 __ovld vload2(size_t offset, const __constant half *p);
-half3 __ovld vload3(size_t offset, const __constant half *p);
-half4 __ovld vload4(size_t offset, const __constant half *p);
-half8 __ovld vload8(size_t offset, const __constant half *p);
-half16 __ovld vload16(size_t offset, const __constant half *p);
-#endif //cl_khr_fp16
-
-#if defined(__opencl_c_generic_address_space)
-char2 __ovld vload2(size_t offset, const char *p);
-uchar2 __ovld vload2(size_t offset, const uchar *p);
-short2 __ovld vload2(size_t offset, const short *p);
-ushort2 __ovld vload2(size_t offset, const ushort *p);
-int2 __ovld vload2(size_t offset, const int *p);
-uint2 __ovld vload2(size_t offset, const uint *p);
-long2 __ovld vload2(size_t offset, const long *p);
-ulong2 __ovld vload2(size_t offset, const ulong *p);
-float2 __ovld vload2(size_t offset, const float *p);
-char3 __ovld vload3(size_t offset, const char *p);
-uchar3 __ovld vload3(size_t offset, const uchar *p);
-short3 __ovld vload3(size_t offset, const short *p);
-ushort3 __ovld vload3(size_t offset, const ushort *p);
-int3 __ovld vload3(size_t offset, const int *p);
-uint3 __ovld vload3(size_t offset, const uint *p);
-long3 __ovld vload3(size_t offset, const long *p);
-ulong3 __ovld vload3(size_t offset, const ulong *p);
-float3 __ovld vload3(size_t offset, const float *p);
-char4 __ovld vload4(size_t offset, const char *p);
-uchar4 __ovld vload4(size_t offset, const uchar *p);
-short4 __ovld vload4(size_t offset, const short *p);
-ushort4 __ovld vload4(size_t offset, const ushort *p);
-int4 __ovld vload4(size_t offset, const int *p);
-uint4 __ovld vload4(size_t offset, const uint *p);
-long4 __ovld vload4(size_t offset, const long *p);
-ulong4 __ovld vload4(size_t offset, const ulong *p);
-float4 __ovld vload4(size_t offset, const float *p);
-char8 __ovld vload8(size_t offset, const char *p);
-uchar8 __ovld vload8(size_t offset, const uchar *p);
-short8 __ovld vload8(size_t offset, const short *p);
-ushort8 __ovld vload8(size_t offset, const ushort *p);
-int8 __ovld vload8(size_t offset, const int *p);
-uint8 __ovld vload8(size_t offset, const uint *p);
-long8 __ovld vload8(size_t offset, const long *p);
-ulong8 __ovld vload8(size_t offset, const ulong *p);
-float8 __ovld vload8(size_t offset, const float *p);
-char16 __ovld vload16(size_t offset, const char *p);
-uchar16 __ovld vload16(size_t offset, const uchar *p);
-short16 __ovld vload16(size_t offset, const short *p);
-ushort16 __ovld vload16(size_t offset, const ushort *p);
-int16 __ovld vload16(size_t offset, const int *p);
-uint16 __ovld vload16(size_t offset, const uint *p);
-long16 __ovld vload16(size_t offset, const long *p);
-ulong16 __ovld vload16(size_t offset, const ulong *p);
-float16 __ovld vload16(size_t offset, const float *p);
-
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const double *p);
-double3 __ovld vload3(size_t offset, const double *p);
-double4 __ovld vload4(size_t offset, const double *p);
-double8 __ovld vload8(size_t offset, const double *p);
-double16 __ovld vload16(size_t offset, const double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const half *p);
-half2 __ovld vload2(size_t offset, const half *p);
-half3 __ovld vload3(size_t offset, const half *p);
-half4 __ovld vload4(size_t offset, const half *p);
-half8 __ovld vload8(size_t offset, const half *p);
-half16 __ovld vload16(size_t offset, const half *p);
-#endif //cl_khr_fp16
-#else
-char2 __ovld vload2(size_t offset, const __global char *p);
-uchar2 __ovld vload2(size_t offset, const __global uchar *p);
-short2 __ovld vload2(size_t offset, const __global short *p);
-ushort2 __ovld vload2(size_t offset, const __global ushort *p);
-int2 __ovld vload2(size_t offset, const __global int *p);
-uint2 __ovld vload2(size_t offset, const __global uint *p);
-long2 __ovld vload2(size_t offset, const __global long *p);
-ulong2 __ovld vload2(size_t offset, const __global ulong *p);
-float2 __ovld vload2(size_t offset, const __global float *p);
-char3 __ovld vload3(size_t offset, const __global char *p);
-uchar3 __ovld vload3(size_t offset, const __global uchar *p);
-short3 __ovld vload3(size_t offset, const __global short *p);
-ushort3 __ovld vload3(size_t offset, const __global ushort *p);
-int3 __ovld vload3(size_t offset, const __global int *p);
-uint3 __ovld vload3(size_t offset, const __global uint *p);
-long3 __ovld vload3(size_t offset, const __global long *p);
-ulong3 __ovld vload3(size_t offset, const __global ulong *p);
-float3 __ovld vload3(size_t offset, const __global float *p);
-char4 __ovld vload4(size_t offset, const __global char *p);
-uchar4 __ovld vload4(size_t offset, const __global uchar *p);
-short4 __ovld vload4(size_t offset, const __global short *p);
-ushort4 __ovld vload4(size_t offset, const __global ushort *p);
-int4 __ovld vload4(size_t offset, const __global int *p);
-uint4 __ovld vload4(size_t offset, const __global uint *p);
-long4 __ovld vload4(size_t offset, const __global long *p);
-ulong4 __ovld vload4(size_t offset, const __global ulong *p);
-float4 __ovld vload4(size_t offset, const __global float *p);
-char8 __ovld vload8(size_t offset, const __global char *p);
-uchar8 __ovld vload8(size_t offset, const __global uchar *p);
-short8 __ovld vload8(size_t offset, const __global short *p);
-ushort8 __ovld vload8(size_t offset, const __global ushort *p);
-int8 __ovld vload8(size_t offset, const __global int *p);
-uint8 __ovld vload8(size_t offset, const __global uint *p);
-long8 __ovld vload8(size_t offset, const __global long *p);
-ulong8 __ovld vload8(size_t offset, const __global ulong *p);
-float8 __ovld vload8(size_t offset, const __global float *p);
-char16 __ovld vload16(size_t offset, const __global char *p);
-uchar16 __ovld vload16(size_t offset, const __global uchar *p);
-short16 __ovld vload16(size_t offset, const __global short *p);
-ushort16 __ovld vload16(size_t offset, const __global ushort *p);
-int16 __ovld vload16(size_t offset, const __global int *p);
-uint16 __ovld vload16(size_t offset, const __global uint *p);
-long16 __ovld vload16(size_t offset, const __global long *p);
-ulong16 __ovld vload16(size_t offset, const __global ulong *p);
-float16 __ovld vload16(size_t offset, const __global float *p);
-char2 __ovld vload2(size_t offset, const __local char *p);
-uchar2 __ovld vload2(size_t offset, const __local uchar *p);
-short2 __ovld vload2(size_t offset, const __local short *p);
-ushort2 __ovld vload2(size_t offset, const __local ushort *p);
-int2 __ovld vload2(size_t offset, const __local int *p);
-uint2 __ovld vload2(size_t offset, const __local uint *p);
-long2 __ovld vload2(size_t offset, const __local long *p);
-ulong2 __ovld vload2(size_t offset, const __local ulong *p);
-float2 __ovld vload2(size_t offset, const __local float *p);
-char3 __ovld vload3(size_t offset, const __local char *p);
-uchar3 __ovld vload3(size_t offset, const __local uchar *p);
-short3 __ovld vload3(size_t offset, const __local short *p);
-ushort3 __ovld vload3(size_t offset, const __local ushort *p);
-int3 __ovld vload3(size_t offset, const __local int *p);
-uint3 __ovld vload3(size_t offset, const __local uint *p);
-long3 __ovld vload3(size_t offset, const __local long *p);
-ulong3 __ovld vload3(size_t offset, const __local ulong *p);
-float3 __ovld vload3(size_t offset, const __local float *p);
-char4 __ovld vload4(size_t offset, const __local char *p);
-uchar4 __ovld vload4(size_t offset, const __local uchar *p);
-short4 __ovld vload4(size_t offset, const __local short *p);
-ushort4 __ovld vload4(size_t offset, const __local ushort *p);
-int4 __ovld vload4(size_t offset, const __local int *p);
-uint4 __ovld vload4(size_t offset, const __local uint *p);
-long4 __ovld vload4(size_t offset, const __local long *p);
-ulong4 __ovld vload4(size_t offset, const __local ulong *p);
-float4 __ovld vload4(size_t offset, const __local float *p);
-char8 __ovld vload8(size_t offset, const __local char *p);
-uchar8 __ovld vload8(size_t offset, const __local uchar *p);
-short8 __ovld vload8(size_t offset, const __local short *p);
-ushort8 __ovld vload8(size_t offset, const __local ushort *p);
-int8 __ovld vload8(size_t offset, const __local int *p);
-uint8 __ovld vload8(size_t offset, const __local uint *p);
-long8 __ovld vload8(size_t offset, const __local long *p);
-ulong8 __ovld vload8(size_t offset, const __local ulong *p);
-float8 __ovld vload8(size_t offset, const __local float *p);
-char16 __ovld vload16(size_t offset, const __local char *p);
-uchar16 __ovld vload16(size_t offset, const __local uchar *p);
-short16 __ovld vload16(size_t offset, const __local short *p);
-ushort16 __ovld vload16(size_t offset, const __local ushort *p);
-int16 __ovld vload16(size_t offset, const __local int *p);
-uint16 __ovld vload16(size_t offset, const __local uint *p);
-long16 __ovld vload16(size_t offset, const __local long *p);
-ulong16 __ovld vload16(size_t offset, const __local ulong *p);
-float16 __ovld vload16(size_t offset, const __local float *p);
-char2 __ovld vload2(size_t offset, const __private char *p);
-uchar2 __ovld vload2(size_t offset, const __private uchar *p);
-short2 __ovld vload2(size_t offset, const __private short *p);
-ushort2 __ovld vload2(size_t offset, const __private ushort *p);
-int2 __ovld vload2(size_t offset, const __private int *p);
-uint2 __ovld vload2(size_t offset, const __private uint *p);
-long2 __ovld vload2(size_t offset, const __private long *p);
-ulong2 __ovld vload2(size_t offset, const __private ulong *p);
-float2 __ovld vload2(size_t offset, const __private float *p);
-char3 __ovld vload3(size_t offset, const __private char *p);
-uchar3 __ovld vload3(size_t offset, const __private uchar *p);
-short3 __ovld vload3(size_t offset, const __private short *p);
-ushort3 __ovld vload3(size_t offset, const __private ushort *p);
-int3 __ovld vload3(size_t offset, const __private int *p);
-uint3 __ovld vload3(size_t offset, const __private uint *p);
-long3 __ovld vload3(size_t offset, const __private long *p);
-ulong3 __ovld vload3(size_t offset, const __private ulong *p);
-float3 __ovld vload3(size_t offset, const __private float *p);
-char4 __ovld vload4(size_t offset, const __private char *p);
-uchar4 __ovld vload4(size_t offset, const __private uchar *p);
-short4 __ovld vload4(size_t offset, const __private short *p);
-ushort4 __ovld vload4(size_t offset, const __private ushort *p);
-int4 __ovld vload4(size_t offset, const __private int *p);
-uint4 __ovld vload4(size_t offset, const __private uint *p);
-long4 __ovld vload4(size_t offset, const __private long *p);
-ulong4 __ovld vload4(size_t offset, const __private ulong *p);
-float4 __ovld vload4(size_t offset, const __private float *p);
-char8 __ovld vload8(size_t offset, const __private char *p);
-uchar8 __ovld vload8(size_t offset, const __private uchar *p);
-short8 __ovld vload8(size_t offset, const __private short *p);
-ushort8 __ovld vload8(size_t offset, const __private ushort *p);
-int8 __ovld vload8(size_t offset, const __private int *p);
-uint8 __ovld vload8(size_t offset, const __private uint *p);
-long8 __ovld vload8(size_t offset, const __private long *p);
-ulong8 __ovld vload8(size_t offset, const __private ulong *p);
-float8 __ovld vload8(size_t offset, const __private float *p);
-char16 __ovld vload16(size_t offset, const __private char *p);
-uchar16 __ovld vload16(size_t offset, const __private uchar *p);
-short16 __ovld vload16(size_t offset, const __private short *p);
-ushort16 __ovld vload16(size_t offset, const __private ushort *p);
-int16 __ovld vload16(size_t offset, const __private int *p);
-uint16 __ovld vload16(size_t offset, const __private uint *p);
-long16 __ovld vload16(size_t offset, const __private long *p);
-ulong16 __ovld vload16(size_t offset, const __private ulong *p);
-float16 __ovld vload16(size_t offset, const __private float *p);
-
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __global double *p);
-double3 __ovld vload3(size_t offset, const __global double *p);
-double4 __ovld vload4(size_t offset, const __global double *p);
-double8 __ovld vload8(size_t offset, const __global double *p);
-double16 __ovld vload16(size_t offset, const __global double *p);
-double2 __ovld vload2(size_t offset, const __local double *p);
-double3 __ovld vload3(size_t offset, const __local double *p);
-double4 __ovld vload4(size_t offset, const __local double *p);
-double8 __ovld vload8(size_t offset, const __local double *p);
-double16 __ovld vload16(size_t offset, const __local double *p);
-double2 __ovld vload2(size_t offset, const __private double *p);
-double3 __ovld vload3(size_t offset, const __private double *p);
-double4 __ovld vload4(size_t offset, const __private double *p);
-double8 __ovld vload8(size_t offset, const __private double *p);
-double16 __ovld vload16(size_t offset, const __private double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __global half *p);
-half2 __ovld vload2(size_t offset, const __global half *p);
-half3 __ovld vload3(size_t offset, const __global half *p);
-half4 __ovld vload4(size_t offset, const __global half *p);
-half8 __ovld vload8(size_t offset, const __global half *p);
-half16 __ovld vload16(size_t offset, const __global half *p);
-half __ovld vload(size_t offset, const __local half *p);
-half2 __ovld vload2(size_t offset, const __local half *p);
-half3 __ovld vload3(size_t offset, const __local half *p);
-half4 __ovld vload4(size_t offset, const __local half *p);
-half8 __ovld vload8(size_t offset, const __local half *p);
-half16 __ovld vload16(size_t offset, const __local half *p);
-half __ovld vload(size_t offset, const __private half *p);
-half2 __ovld vload2(size_t offset, const __private half *p);
-half3 __ovld vload3(size_t offset, const __private half *p);
-half4 __ovld vload4(size_t offset, const __private half *p);
-half8 __ovld vload8(size_t offset, const __private half *p);
-half16 __ovld vload16(size_t offset, const __private half *p);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-#if defined(__opencl_c_generic_address_space)
-void __ovld vstore2(char2 data, size_t offset, char *p);
-void __ovld vstore2(uchar2 data, size_t offset, uchar *p);
-void __ovld vstore2(short2 data, size_t offset, short *p);
-void __ovld vstore2(ushort2 data, size_t offset, ushort *p);
-void __ovld vstore2(int2 data, size_t offset, int *p);
-void __ovld vstore2(uint2 data, size_t offset, uint *p);
-void __ovld vstore2(long2 data, size_t offset, long *p);
-void __ovld vstore2(ulong2 data, size_t offset, ulong *p);
-void __ovld vstore2(float2 data, size_t offset, float *p);
-void __ovld vstore3(char3 data, size_t offset, char *p);
-void __ovld vstore3(uchar3 data, size_t offset, uchar *p);
-void __ovld vstore3(short3 data, size_t offset, short *p);
-void __ovld vstore3(ushort3 data, size_t offset, ushort *p);
-void __ovld vstore3(int3 data, size_t offset, int *p);
-void __ovld vstore3(uint3 data, size_t offset, uint *p);
-void __ovld vstore3(long3 data, size_t offset, long *p);
-void __ovld vstore3(ulong3 data, size_t offset, ulong *p);
-void __ovld vstore3(float3 data, size_t offset, float *p);
-void __ovld vstore4(char4 data, size_t offset, char *p);
-void __ovld vstore4(uchar4 data, size_t offset, uchar *p);
-void __ovld vstore4(short4 data, size_t offset, short *p);
-void __ovld vstore4(ushort4 data, size_t offset, ushort *p);
-void __ovld vstore4(int4 data, size_t offset, int *p);
-void __ovld vstore4(uint4 data, size_t offset, uint *p);
-void __ovld vstore4(long4 data, size_t offset, long *p);
-void __ovld vstore4(ulong4 data, size_t offset, ulong *p);
-void __ovld vstore4(float4 data, size_t offset, float *p);
-void __ovld vstore8(char8 data, size_t offset, char *p);
-void __ovld vstore8(uchar8 data, size_t offset, uchar *p);
-void __ovld vstore8(short8 data, size_t offset, short *p);
-void __ovld vstore8(ushort8 data, size_t offset, ushort *p);
-void __ovld vstore8(int8 data, size_t offset, int *p);
-void __ovld vstore8(uint8 data, size_t offset, uint *p);
-void __ovld vstore8(long8 data, size_t offset, long *p);
-void __ovld vstore8(ulong8 data, size_t offset, ulong *p);
-void __ovld vstore8(float8 data, size_t offset, float *p);
-void __ovld vstore16(char16 data, size_t offset, char *p);
-void __ovld vstore16(uchar16 data, size_t offset, uchar *p);
-void __ovld vstore16(short16 data, size_t offset, short *p);
-void __ovld vstore16(ushort16 data, size_t offset, ushort *p);
-void __ovld vstore16(int16 data, size_t offset, int *p);
-void __ovld vstore16(uint16 data, size_t offset, uint *p);
-void __ovld vstore16(long16 data, size_t offset, long *p);
-void __ovld vstore16(ulong16 data, size_t offset, ulong *p);
-void __ovld vstore16(float16 data, size_t offset, float *p);
-#ifdef cl_khr_fp64
-void __ovld vstore2(double2 data, size_t offset, double *p);
-void __ovld vstore3(double3 data, size_t offset, double *p);
-void __ovld vstore4(double4 data, size_t offset, double *p);
-void __ovld vstore8(double8 data, size_t offset, double *p);
-void __ovld vstore16(double16 data, size_t offset, double *p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-void __ovld vstore(half data, size_t offset, half *p);
-void __ovld vstore2(half2 data, size_t offset, half *p);
-void __ovld vstore3(half3 data, size_t offset, half *p);
-void __ovld vstore4(half4 data, size_t offset, half *p);
-void __ovld vstore8(half8 data, size_t offset, half *p);
-void __ovld vstore16(half16 data, size_t offset, half *p);
-#endif //cl_khr_fp16
-#else
-void __ovld vstore2(char2 data, size_t offset, __global char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __global short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __global ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __global int *p);
-void __ovld vstore2(uint2 data, size_t offset, __global uint *p);
-void __ovld vstore2(long2 data, size_t offset, __global long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __global ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __global float *p);
-void __ovld vstore3(char3 data, size_t offset, __global char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __global uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __global short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __global ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __global int *p);
-void __ovld vstore3(uint3 data, size_t offset, __global uint *p);
-void __ovld vstore3(long3 data, size_t offset, __global long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __global ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __global float *p);
-void __ovld vstore4(char4 data, size_t offset, __global char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __global uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __global short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __global ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __global int *p);
-void __ovld vstore4(uint4 data, size_t offset, __global uint *p);
-void __ovld vstore4(long4 data, size_t offset, __global long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __global ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __global float *p);
-void __ovld vstore8(char8 data, size_t offset, __global char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __global uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __global short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __global ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __global int *p);
-void __ovld vstore8(uint8 data, size_t offset, __global uint *p);
-void __ovld vstore8(long8 data, size_t offset, __global long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __global ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __global float *p);
-void __ovld vstore16(char16 data, size_t offset, __global char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __global uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __global short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __global ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __global int *p);
-void __ovld vstore16(uint16 data, size_t offset, __global uint *p);
-void __ovld vstore16(long16 data, size_t offset, __global long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __global ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __global float *p);
-void __ovld vstore2(char2 data, size_t offset, __local char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __local uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __local short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __local ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __local int *p);
-void __ovld vstore2(uint2 data, size_t offset, __local uint *p);
-void __ovld vstore2(long2 data, size_t offset, __local long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __local ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __local float *p);
-void __ovld vstore3(char3 data, size_t offset, __local char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __local uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __local short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __local ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __local int *p);
-void __ovld vstore3(uint3 data, size_t offset, __local uint *p);
-void __ovld vstore3(long3 data, size_t offset, __local long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __local ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __local float *p);
-void __ovld vstore4(char4 data, size_t offset, __local char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __local uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __local short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __local ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __local int *p);
-void __ovld vstore4(uint4 data, size_t offset, __local uint *p);
-void __ovld vstore4(long4 data, size_t offset, __local long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __local ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __local float *p);
-void __ovld vstore8(char8 data, size_t offset, __local char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __local uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __local short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __local ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __local int *p);
-void __ovld vstore8(uint8 data, size_t offset, __local uint *p);
-void __ovld vstore8(long8 data, size_t offset, __local long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __local ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __local float *p);
-void __ovld vstore16(char16 data, size_t offset, __local char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __local uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __local short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __local ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __local int *p);
-void __ovld vstore16(uint16 data, size_t offset, __local uint *p);
-void __ovld vstore16(long16 data, size_t offset, __local long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __local ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __local float *p);
-void __ovld vstore2(char2 data, size_t offset, __private char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __private uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __private short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __private ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __private int *p);
-void __ovld vstore2(uint2 data, size_t offset, __private uint *p);
-void __ovld vstore2(long2 data, size_t offset, __private long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __private ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __private float *p);
-void __ovld vstore3(char3 data, size_t offset, __private char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __private uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __private short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __private ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __private int *p);
-void __ovld vstore3(uint3 data, size_t offset, __private uint *p);
-void __ovld vstore3(long3 data, size_t offset, __private long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __private ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __private float *p);
-void __ovld vstore4(char4 data, size_t offset, __private char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __private uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __private short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __private ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __private int *p);
-void __ovld vstore4(uint4 data, size_t offset, __private uint *p);
-void __ovld vstore4(long4 data, size_t offset, __private long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __private ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __private float *p);
-void __ovld vstore8(char8 data, size_t offset, __private char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __private uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __private short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __private ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __private int *p);
-void __ovld vstore8(uint8 data, size_t offset, __private uint *p);
-void __ovld vstore8(long8 data, size_t offset, __private long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __private ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __private float *p);
-void __ovld vstore16(char16 data, size_t offset, __private char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __private uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __private short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __private ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __private int *p);
-void __ovld vstore16(uint16 data, size_t offset, __private uint *p);
-void __ovld vstore16(long16 data, size_t offset, __private long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __private ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __private float *p);
-#ifdef cl_khr_fp64
-void __ovld vstore2(double2 data, size_t offset, __global double *p);
-void __ovld vstore3(double3 data, size_t offset, __global double *p);
-void __ovld vstore4(double4 data, size_t offset, __global double *p);
-void __ovld vstore8(double8 data, size_t offset, __global double *p);
-void __ovld vstore16(double16 data, size_t offset, __global double *p);
-void __ovld vstore2(double2 data, size_t offset, __local double *p);
-void __ovld vstore3(double3 data, size_t offset, __local double *p);
-void __ovld vstore4(double4 data, size_t offset, __local double *p);
-void __ovld vstore8(double8 data, size_t offset, __local double *p);
-void __ovld vstore16(double16 data, size_t offset, __local double *p);
-void __ovld vstore2(double2 data, size_t offset, __private double *p);
-void __ovld vstore3(double3 data, size_t offset, __private double *p);
-void __ovld vstore4(double4 data, size_t offset, __private double *p);
-void __ovld vstore8(double8 data, size_t offset, __private double *p);
-void __ovld vstore16(double16 data, size_t offset, __private double *p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-void __ovld vstore(half data, size_t offset, __global half *p);
-void __ovld vstore2(half2 data, size_t offset, __global half *p);
-void __ovld vstore3(half3 data, size_t offset, __global half *p);
-void __ovld vstore4(half4 data, size_t offset, __global half *p);
-void __ovld vstore8(half8 data, size_t offset, __global half *p);
-void __ovld vstore16(half16 data, size_t offset, __global half *p);
-void __ovld vstore(half data, size_t offset, __local half *p);
-void __ovld vstore2(half2 data, size_t offset, __local half *p);
-void __ovld vstore3(half3 data, size_t offset, __local half *p);
-void __ovld vstore4(half4 data, size_t offset, __local half *p);
-void __ovld vstore8(half8 data, size_t offset, __local half *p);
-void __ovld vstore16(half16 data, size_t offset, __local half *p);
-void __ovld vstore(half data, size_t offset, __private half *p);
-void __ovld vstore2(half2 data, size_t offset, __private half *p);
-void __ovld vstore3(half3 data, size_t offset, __private half *p);
-void __ovld vstore4(half4 data, size_t offset, __private half *p);
-void __ovld vstore8(half8 data, size_t offset, __private half *p);
-void __ovld vstore16(half16 data, size_t offset, __private half *p);
-#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Read sizeof (half) bytes of data from address
- * (p + offset). The data read is interpreted as a
- * half value. The half value is converted to a
- * float value and the float value is returned.
- * The read address computed as (p + offset)
- * must be 16-bit aligned.
- */
-float __ovld vload_half(size_t offset, const __constant half *p);
-#if defined(__opencl_c_generic_address_space)
-float __ovld vload_half(size_t offset, const half *p);
-#else
-float __ovld vload_half(size_t offset, const __global half *p);
-float __ovld vload_half(size_t offset, const __local half *p);
-float __ovld vload_half(size_t offset, const __private half *p);
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * Read sizeof (halfn) bytes of data from address
- * (p + (offset * n)). The data read is interpreted
- * as a halfn value. The halfn value read is
- * converted to a floatn value and the floatn
- * value is returned. The read address computed
- * as (p + (offset * n)) must be 16-bit aligned.
- */
-float2 __ovld vload_half2(size_t offset, const __constant half *p);
-float3 __ovld vload_half3(size_t offset, const __constant half *p);
-float4 __ovld vload_half4(size_t offset, const __constant half *p);
-float8 __ovld vload_half8(size_t offset, const __constant half *p);
-float16 __ovld vload_half16(size_t offset, const __constant half *p);
-#if defined(__opencl_c_generic_address_space)
-float2 __ovld vload_half2(size_t offset, const half *p);
-float3 __ovld vload_half3(size_t offset, const half *p);
-float4 __ovld vload_half4(size_t offset, const half *p);
-float8 __ovld vload_half8(size_t offset, const half *p);
-float16 __ovld vload_half16(size_t offset, const half *p);
-#else
-float2 __ovld vload_half2(size_t offset, const __global half *p);
-float3 __ovld vload_half3(size_t offset, const __global half *p);
-float4 __ovld vload_half4(size_t offset, const __global half *p);
-float8 __ovld vload_half8(size_t offset, const __global half *p);
-float16 __ovld vload_half16(size_t offset, const __global half *p);
-float2 __ovld vload_half2(size_t offset, const __local half *p);
-float3 __ovld vload_half3(size_t offset, const __local half *p);
-float4 __ovld vload_half4(size_t offset, const __local half *p);
-float8 __ovld vload_half8(size_t offset, const __local half *p);
-float16 __ovld vload_half16(size_t offset, const __local half *p);
-float2 __ovld vload_half2(size_t offset, const __private half *p);
-float3 __ovld vload_half3(size_t offset, const __private half *p);
-float4 __ovld vload_half4(size_t offset, const __private half *p);
-float8 __ovld vload_half8(size_t offset, const __private half *p);
-float16 __ovld vload_half16(size_t offset, const __private half *p);
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * The float value given by data is first
- * converted to a half value using the appropriate
- * rounding mode. The half value is then written
- * to address computed as (p + offset). The
- * address computed as (p + offset) must be 16-
- * bit aligned.
- * vstore_half use the current rounding mode.
- * The default current rounding mode is round to
- * nearest even.
- */
-#if defined(__opencl_c_generic_address_space)
-void __ovld vstore_half(float data, size_t offset, half *p);
-void __ovld vstore_half_rte(float data, size_t offset, half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half(double data, size_t offset, half *p);
-void __ovld vstore_half_rte(double data, size_t offset, half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, half *p);
-#endif //cl_khr_fp64
-#else
-void __ovld vstore_half(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __global half *p);
-void __ovld vstore_half(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __local half *p);
-void __ovld vstore_half(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __private half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __global half *p);
-void __ovld vstore_half(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __local half *p);
-void __ovld vstore_half(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
-#endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * The floatn value given by data is converted to
- * a halfn value using the appropriate rounding
- * mode. The halfn value is then written to
- * address computed as (p + (offset * n)). The
- * address computed as (p + (offset * n)) must be
- * 16-bit aligned.
- * vstore_halfn uses the current rounding mode.
- * The default current rounding mode is round to
- * nearest even.
- */
-#if defined(__opencl_c_generic_address_space)
-void __ovld vstore_half2(float2 data, size_t offset, half *p);
-void __ovld vstore_half3(float3 data, size_t offset, half *p);
-void __ovld vstore_half4(float4 data, size_t offset, half *p);
-void __ovld vstore_half8(float8 data, size_t offset, half *p);
-void __ovld vstore_half16(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half2(double2 data, size_t offset, half *p);
-void __ovld vstore_half3(double3 data, size_t offset, half *p);
-void __ovld vstore_half4(double4 data, size_t offset, half *p);
-void __ovld vstore_half8(double8 data, size_t offset, half *p);
-void __ovld vstore_half16(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p);
-#endif //cl_khr_fp64
-#else
-void __ovld vstore_half2(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __private half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half2(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
-#endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
- * bytes of data from address (p + (offset * n)).
- * The data read is interpreted as a halfn value.
- * The halfn value read is converted to a floatn
- * value and the floatn value is returned.
- * The address computed as (p + (offset * n))
- * must be aligned to sizeof (halfn) bytes.
- * For n = 3, vloada_half3 reads a half3 from
- * address (p + (offset * 4)) and returns a float3.
- * The address computed as (p + (offset * 4))
- * must be aligned to sizeof (half) * 4 bytes.
- */
-float2 __ovld vloada_half2(size_t offset, const __constant half *p);
-float3 __ovld vloada_half3(size_t offset, const __constant half *p);
-float4 __ovld vloada_half4(size_t offset, const __constant half *p);
-float8 __ovld vloada_half8(size_t offset, const __constant half *p);
-float16 __ovld vloada_half16(size_t offset, const __constant half *p);
-#if defined(__opencl_c_generic_address_space)
-float2 __ovld vloada_half2(size_t offset, const half *p);
-float3 __ovld vloada_half3(size_t offset, const half *p);
-float4 __ovld vloada_half4(size_t offset, const half *p);
-float8 __ovld vloada_half8(size_t offset, const half *p);
-float16 __ovld vloada_half16(size_t offset, const half *p);
-#else
-float2 __ovld vloada_half2(size_t offset, const __global half *p);
-float3 __ovld vloada_half3(size_t offset, const __global half *p);
-float4 __ovld vloada_half4(size_t offset, const __global half *p);
-float8 __ovld vloada_half8(size_t offset, const __global half *p);
-float16 __ovld vloada_half16(size_t offset, const __global half *p);
-float2 __ovld vloada_half2(size_t offset, const __local half *p);
-float3 __ovld vloada_half3(size_t offset, const __local half *p);
-float4 __ovld vloada_half4(size_t offset, const __local half *p);
-float8 __ovld vloada_half8(size_t offset, const __local half *p);
-float16 __ovld vloada_half16(size_t offset, const __local half *p);
-float2 __ovld vloada_half2(size_t offset, const __private half *p);
-float3 __ovld vloada_half3(size_t offset, const __private half *p);
-float4 __ovld vloada_half4(size_t offset, const __private half *p);
-float8 __ovld vloada_half8(size_t offset, const __private half *p);
-float16 __ovld vloada_half16(size_t offset, const __private half *p);
-#endif //defined(__opencl_c_generic_address_space)
-
-/**
- * The floatn value given by data is converted to
- * a halfn value using the appropriate rounding
- * mode.
- * For n = 1, 2, 4, 8 and 16, the halfn value is
- * written to the address computed as (p + (offset
- * * n)). The address computed as (p + (offset *
- * n)) must be aligned to sizeof (halfn) bytes.
- * For n = 3, the half3 value is written to the
- * address computed as (p + (offset * 4)). The
- * address computed as (p + (offset * 4)) must be
- * aligned to sizeof (half) * 4 bytes.
- * vstorea_halfn uses the current rounding
- * mode. The default current rounding mode is
- * round to nearest even.
- */
-#if defined(__opencl_c_generic_address_space)
-void __ovld vstorea_half2(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p);
-
-#ifdef cl_khr_fp64
-void __ovld vstorea_half2(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p);
-#endif //cl_khr_fp64
-
-#else
-void __ovld vstorea_half2(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __private half *p);
-
-#ifdef cl_khr_fp64
-void __ovld vstorea_half2(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtn(double2 data,size_t offset, __private half *p);
-void __ovld vstorea_half3_rtn(double3 data,size_t offset, __private half *p);
-void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p);
-void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
-void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
-#endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
-
-// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
-
-/**
- * All work-items in a work-group executing the kernel
- * on a processor must execute this function before any
- * are allowed to continue execution beyond the barrier.
- * This function must be encountered by all work-items in
- * a work-group executing the kernel.
- * If barrier is inside a conditional statement, then all
- * work-items must enter the conditional if any work-item
- * enters the conditional statement and executes the
- * barrier.
- * If barrer is inside a loop, all work-items must execute
- * the barrier for each iteration of the loop before any are
- * allowed to continue execution beyond the barrier.
- * The barrier function also queues a memory fence
- * (reads and writes) to ensure correct ordering of
- * memory operations to local or global memory.
- * The flags argument specifies the memory address space
- * and can be set to a combination of the following literal
- * values.
- * CLK_LOCAL_MEM_FENCE - The barrier function
- * will either flush any variables stored in local memory
- * or queue a memory fence to ensure correct ordering of
- * memory operations to local memory.
- * CLK_GLOBAL_MEM_FENCE - The barrier function
- * will queue a memory fence to ensure correct ordering
- * of memory operations to global memory. This can be
- * useful when work-items, for example, write to buffer or
- * image objects and then want to read the updated data.
- */
-
-void __ovld __conv barrier(cl_mem_fence_flags flags);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
-void __ovld __conv work_group_barrier(cl_mem_fence_flags flags);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions
-
-/**
- * Orders loads and stores of a work-item
- * executing a kernel. This means that loads
- * and stores preceding the mem_fence will
- * be committed to memory before any loads
- * and stores following the mem_fence.
- * The flags argument specifies the memory
- * address space and can be set to a
- * combination of the following literal
- * values:
- * CLK_LOCAL_MEM_FENCE
- * CLK_GLOBAL_MEM_FENCE.
- */
-void __ovld mem_fence(cl_mem_fence_flags flags);
-
-/**
- * Read memory barrier that orders only
- * loads.
- * The flags argument specifies the memory
- * address space and can be set to a
- * combination of the following literal
- * values:
- * CLK_LOCAL_MEM_FENCE
- * CLK_GLOBAL_MEM_FENCE.
- */
-void __ovld read_mem_fence(cl_mem_fence_flags flags);
-
-/**
- * Write memory barrier that orders only
- * stores.
- * The flags argument specifies the memory
- * address space and can be set to a
- * combination of the following literal
- * values:
- * CLK_LOCAL_MEM_FENCE
- * CLK_GLOBAL_MEM_FENCE.
- */
-void __ovld write_mem_fence(cl_mem_fence_flags flags);
-
-// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions
-
-#if defined(__opencl_c_generic_address_space)
-cl_mem_fence_flags __ovld get_fence(const void *ptr);
-cl_mem_fence_flags __ovld get_fence(void *ptr);
-
-/**
- * Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions
- * and checked in Sema since they should be declared as
- *   addr gentype* to_addr (gentype*);
- * where gentype is builtin type or user defined type.
- */
-
-#endif //defined(__opencl_c_generic_address_space)
-
-// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch
-
-/**
- * event_t async_work_group_copy (
- * __global gentype *dst,
- * const __local gentype *src,
- * size_t num_elements,
- * event_t event)
- * Perform an async copy of num_elements
- * gentype elements from src to dst. The async
- * copy is performed by all work-items in a workgroup
- * and this built-in function must therefore
- * be encountered by all work-items in a workgroup
- * executing the kernel with the same
- * argument values; otherwise the results are
- * undefined.
- * Returns an event object that can be used by
- * wait_group_events to wait for the async copy
- * to finish. The event argument can also be used
- * to associate the async_work_group_copy with
- * a previous async copy allowing an event to be
- * shared by multiple async copies; otherwise event
- * should be zero.
- * If event argument is non-zero, the event object
- * supplied in event argument will be returned.
- * This function does not perform any implicit
- * synchronization of source data such as using a
- * barrier before performing the copy.
- */
-event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short *dst, const __global short *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int *dst, const __global int *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint *dst, const __global uint *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long *dst, const __global long *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float *dst, const __global float *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char *dst, const __local char *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short *dst, const __local short *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int *dst, const __local int *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint *dst, const __local uint *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long *dst, const __local long *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float *dst, const __local float *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, event_t event);
-#ifdef cl_khr_fp64
-event_t __ovld async_work_group_copy(__local double *dst, const __global double *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double *dst, const __local double *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, event_t event);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-event_t __ovld async_work_group_copy(__local half *dst, const __global half *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half *dst, const __local half *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, event_t event);
-#endif //cl_khr_fp16
-
-/**
- * Perform an async gather of num_elements
- * gentype elements from src to dst. The
- * src_stride is the stride in elements for each
- * gentype element read from src. The dst_stride
- * is the stride in elements for each gentype
- * element written to dst. The async gather is
- * performed by all work-items in a work-group.
- * This built-in function must therefore be
- * encountered by all work-items in a work-group
- * executing the kernel with the same argument
- * values; otherwise the results are undefined.
- * Returns an event object that can be used by
- * wait_group_events to wait for the async copy
- * to finish. The event argument can also be used
- * to associate the
- * async_work_group_strided_copy with a
- * previous async copy allowing an event to be
- * shared by multiple async copies; otherwise event
- * should be zero.
- * If event argument is non-zero, the event object
- * supplied in event argument will be returned.
- * This function does not perform any implicit
- * synchronization of source data such as using a
- * barrier before performing the copy.
- */
-event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short *dst, const __global short *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int *dst, const __global int *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint *dst, const __global uint *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long *dst, const __global long *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float *dst, const __global float *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char *dst, const __local char *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short *dst, const __local short *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int *dst, const __local int *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint *dst, const __local uint *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long *dst, const __local long *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float *dst, const __local float *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, size_t dst_stride, event_t event);
-#ifdef cl_khr_fp64
-event_t __ovld async_work_group_strided_copy(__local double *dst, const __global double *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double *dst, const __local double *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, size_t dst_stride, event_t event);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-event_t __ovld async_work_group_strided_copy(__local half *dst, const __global half *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half *dst, const __local half *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, size_t dst_stride, event_t event);
-#endif //cl_khr_fp16
-
-/**
- * Wait for events that identify the
- * async_work_group_copy operations to
- * complete. The event objects specified in
- * event_list will be released after the wait is
- * performed.
- * This function must be encountered by all workitems
- * in a work-group executing the kernel with
- * the same num_events and event objects specified
- * in event_list; otherwise the results are undefined.
- */
-void __ovld wait_group_events(int num_events, event_t *event_list);
-
-/**
- * Prefetch num_elements * sizeof(gentype)
- * bytes into the global cache. The prefetch
- * instruction is applied to a work-item in a workgroup
- * and does not affect the functional
- * behavior of the kernel.
- */
-void __ovld prefetch(const __global char *p, size_t num_elements);
-void __ovld prefetch(const __global uchar *p, size_t num_elements);
-void __ovld prefetch(const __global short *p, size_t num_elements);
-void __ovld prefetch(const __global ushort *p, size_t num_elements);
-void __ovld prefetch(const __global int *p, size_t num_elements);
-void __ovld prefetch(const __global uint *p, size_t num_elements);
-void __ovld prefetch(const __global long *p, size_t num_elements);
-void __ovld prefetch(const __global ulong *p, size_t num_elements);
-void __ovld prefetch(const __global float *p, size_t num_elements);
-void __ovld prefetch(const __global char2 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar2 *p, size_t num_elements);
-void __ovld prefetch(const __global short2 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort2 *p, size_t num_elements);
-void __ovld prefetch(const __global int2 *p, size_t num_elements);
-void __ovld prefetch(const __global uint2 *p, size_t num_elements);
-void __ovld prefetch(const __global long2 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong2 *p, size_t num_elements);
-void __ovld prefetch(const __global float2 *p, size_t num_elements);
-void __ovld prefetch(const __global char3 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar3 *p, size_t num_elements);
-void __ovld prefetch(const __global short3 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort3 *p, size_t num_elements);
-void __ovld prefetch(const __global int3 *p, size_t num_elements);
-void __ovld prefetch(const __global uint3 *p, size_t num_elements);
-void __ovld prefetch(const __global long3 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong3 *p, size_t num_elements);
-void __ovld prefetch(const __global float3 *p, size_t num_elements);
-void __ovld prefetch(const __global char4 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar4 *p, size_t num_elements);
-void __ovld prefetch(const __global short4 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort4 *p, size_t num_elements);
-void __ovld prefetch(const __global int4 *p, size_t num_elements);
-void __ovld prefetch(const __global uint4 *p, size_t num_elements);
-void __ovld prefetch(const __global long4 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong4 *p, size_t num_elements);
-void __ovld prefetch(const __global float4 *p, size_t num_elements);
-void __ovld prefetch(const __global char8 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar8 *p, size_t num_elements);
-void __ovld prefetch(const __global short8 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort8 *p, size_t num_elements);
-void __ovld prefetch(const __global int8 *p, size_t num_elements);
-void __ovld prefetch(const __global uint8 *p, size_t num_elements);
-void __ovld prefetch(const __global long8 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong8 *p, size_t num_elements);
-void __ovld prefetch(const __global float8 *p, size_t num_elements);
-void __ovld prefetch(const __global char16 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar16 *p, size_t num_elements);
-void __ovld prefetch(const __global short16 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort16 *p, size_t num_elements);
-void __ovld prefetch(const __global int16 *p, size_t num_elements);
-void __ovld prefetch(const __global uint16 *p, size_t num_elements);
-void __ovld prefetch(const __global long16 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong16 *p, size_t num_elements);
-void __ovld prefetch(const __global float16 *p, size_t num_elements);
-#ifdef cl_khr_fp64
-void __ovld prefetch(const __global double *p, size_t num_elements);
-void __ovld prefetch(const __global double2 *p, size_t num_elements);
-void __ovld prefetch(const __global double3 *p, size_t num_elements);
-void __ovld prefetch(const __global double4 *p, size_t num_elements);
-void __ovld prefetch(const __global double8 *p, size_t num_elements);
-void __ovld prefetch(const __global double16 *p, size_t num_elements);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-void __ovld prefetch(const __global half *p, size_t num_elements);
-void __ovld prefetch(const __global half2 *p, size_t num_elements);
-void __ovld prefetch(const __global half3 *p, size_t num_elements);
-void __ovld prefetch(const __global half4 *p, size_t num_elements);
-void __ovld prefetch(const __global half8 *p, size_t num_elements);
-void __ovld prefetch(const __global half16 *p, size_t num_elements);
-#endif // cl_khr_fp16
-
-// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
-#endif
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old + val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_add(volatile __global int *p, int val);
-unsigned int __ovld atomic_add(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_add(volatile __local int *p, int val);
-unsigned int __ovld atomic_add(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_add(volatile int *p, int val);
-unsigned int __ovld atomic_add(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_add(volatile __global int *p, int val);
-unsigned int __ovld atom_add(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_add(volatile __local int *p, int val);
-unsigned int __ovld atom_add(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_add(volatile __global long *p, long val);
-unsigned long __ovld atom_add(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_add(volatile __local long *p, long val);
-unsigned long __ovld atom_add(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old) stored at location pointed by p.
- * Compute (old - val) and store result at location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_sub(volatile __global int *p, int val);
-unsigned int __ovld atomic_sub(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_sub(volatile __local int *p, int val);
-unsigned int __ovld atomic_sub(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_sub(volatile int *p, int val);
-unsigned int __ovld atomic_sub(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_sub(volatile __global int *p, int val);
-unsigned int __ovld atom_sub(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_sub(volatile __local int *p, int val);
-unsigned int __ovld atom_sub(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_sub(volatile __global long *p, long val);
-unsigned long __ovld atom_sub(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_sub(volatile __local long *p, long val);
-unsigned long __ovld atom_sub(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Swaps the old value stored at location p
- * with new value given by val. Returns old
- * value.
- */
-int __ovld atomic_xchg(volatile __global int *p, int val);
-unsigned int __ovld atomic_xchg(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_xchg(volatile __local int *p, int val);
-unsigned int __ovld atomic_xchg(volatile __local unsigned int *p, unsigned int val);
-float __ovld atomic_xchg(volatile __global float *p, float val);
-float __ovld atomic_xchg(volatile __local float *p, float val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_xchg(volatile int *p, int val);
-unsigned int __ovld atomic_xchg(volatile unsigned int *p, unsigned int val);
-float __ovld atomic_xchg(volatile float *p, float val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_xchg(volatile __global int *p, int val);
-unsigned int __ovld atom_xchg(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_xchg(volatile __local int *p, int val);
-unsigned int __ovld atom_xchg(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_xchg(volatile __global long *p, long val);
-long __ovld atom_xchg(volatile __local long *p, long val);
-unsigned long __ovld atom_xchg(volatile __global unsigned long *p, unsigned long val);
-unsigned long __ovld atom_xchg(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old + 1) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_inc(volatile __global int *p);
-unsigned int __ovld atomic_inc(volatile __global unsigned int *p);
-int __ovld atomic_inc(volatile __local int *p);
-unsigned int __ovld atomic_inc(volatile __local unsigned int *p);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_inc(volatile int *p);
-unsigned int __ovld atomic_inc(volatile unsigned int *p);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_inc(volatile __global int *p);
-unsigned int __ovld atom_inc(volatile __global unsigned int *p);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_inc(volatile __local int *p);
-unsigned int __ovld atom_inc(volatile __local unsigned int *p);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_inc(volatile __global long *p);
-unsigned long __ovld atom_inc(volatile __global unsigned long *p);
-long __ovld atom_inc(volatile __local long *p);
-unsigned long __ovld atom_inc(volatile __local unsigned long *p);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old - 1) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_dec(volatile __global int *p);
-unsigned int __ovld atomic_dec(volatile __global unsigned int *p);
-int __ovld atomic_dec(volatile __local int *p);
-unsigned int __ovld atomic_dec(volatile __local unsigned int *p);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_dec(volatile int *p);
-unsigned int __ovld atomic_dec(volatile unsigned int *p);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_dec(volatile __global int *p);
-unsigned int __ovld atom_dec(volatile __global unsigned int *p);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_dec(volatile __local int *p);
-unsigned int __ovld atom_dec(volatile __local unsigned int *p);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_dec(volatile __global long *p);
-unsigned long __ovld atom_dec(volatile __global unsigned long *p);
-long __ovld atom_dec(volatile __local long *p);
-unsigned long __ovld atom_dec(volatile __local unsigned long *p);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old == cmp) ? val : old and store result at
- * location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
-int __ovld atomic_cmpxchg(volatile __local int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_cmpxchg(volatile int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile unsigned int *p, unsigned int cmp, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_cmpxchg(volatile __global int *p, int cmp, int val);
-unsigned int __ovld atom_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_cmpxchg(volatile __local int *p, int cmp, int val);
-unsigned int __ovld atom_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_cmpxchg(volatile __global long *p, long cmp, long val);
-unsigned long __ovld atom_cmpxchg(volatile __global unsigned long *p, unsigned long cmp, unsigned long val);
-long __ovld atom_cmpxchg(volatile __local long *p, long cmp, long val);
-unsigned long __ovld atom_cmpxchg(volatile __local unsigned long *p, unsigned long cmp, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * min(old, val) and store minimum value at
- * location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_min(volatile __global int *p, int val);
-unsigned int __ovld atomic_min(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_min(volatile __local int *p, int val);
-unsigned int __ovld atomic_min(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_min(volatile int *p, int val);
-unsigned int __ovld atomic_min(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_min(volatile __global int *p, int val);
-unsigned int __ovld atom_min(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_min(volatile __local int *p, int val);
-unsigned int __ovld atom_min(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_min(volatile __global long *p, long val);
-unsigned long __ovld atom_min(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_min(volatile __local long *p, long val);
-unsigned long __ovld atom_min(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * max(old, val) and store maximum value at
- * location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_max(volatile __global int *p, int val);
-unsigned int __ovld atomic_max(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_max(volatile __local int *p, int val);
-unsigned int __ovld atomic_max(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_max(volatile int *p, int val);
-unsigned int __ovld atomic_max(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_max(volatile __global int *p, int val);
-unsigned int __ovld atom_max(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_max(volatile __local int *p, int val);
-unsigned int __ovld atom_max(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_max(volatile __global long *p, long val);
-unsigned long __ovld atom_max(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_max(volatile __local long *p, long val);
-unsigned long __ovld atom_max(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old & val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_and(volatile __global int *p, int val);
-unsigned int __ovld atomic_and(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_and(volatile __local int *p, int val);
-unsigned int __ovld atomic_and(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_and(volatile int *p, int val);
-unsigned int __ovld atomic_and(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_and(volatile __global int *p, int val);
-unsigned int __ovld atom_and(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_and(volatile __local int *p, int val);
-unsigned int __ovld atom_and(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_and(volatile __global long *p, long val);
-unsigned long __ovld atom_and(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_and(volatile __local long *p, long val);
-unsigned long __ovld atom_and(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old | val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_or(volatile __global int *p, int val);
-unsigned int __ovld atomic_or(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_or(volatile __local int *p, int val);
-unsigned int __ovld atomic_or(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_or(volatile int *p, int val);
-unsigned int __ovld atomic_or(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_or(volatile __global int *p, int val);
-unsigned int __ovld atom_or(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_or(volatile __local int *p, int val);
-unsigned int __ovld atom_or(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_or(volatile __global long *p, long val);
-unsigned long __ovld atom_or(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_or(volatile __local long *p, long val);
-unsigned long __ovld atom_or(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old ^ val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_xor(volatile __global int *p, int val);
-unsigned int __ovld atomic_xor(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_xor(volatile __local int *p, int val);
-unsigned int __ovld atomic_xor(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_xor(volatile int *p, int val);
-unsigned int __ovld atomic_xor(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_xor(volatile __global int *p, int val);
-unsigned int __ovld atom_xor(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_xor(volatile __local int *p, int val);
-unsigned int __ovld atom_xor(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_xor(volatile __global long *p, long val);
-unsigned long __ovld atom_xor(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_xor(volatile __local long *p, long val);
-unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable
-#endif
-
-// OpenCL v2.0 s6.13.11 - Atomics Functions
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
-#endif
-
-// atomic_init()
-#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_init(volatile atomic_int *object, int value);
-void __ovld atomic_init(volatile atomic_uint *object, uint value);
-void __ovld atomic_init(volatile atomic_float *object, float value);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-void __ovld atomic_init(volatile atomic_long *object, long value);
-void __ovld atomic_init(volatile atomic_ulong *object, ulong value);
-#ifdef cl_khr_fp64
-void __ovld atomic_init(volatile atomic_double *object, double value);
-#endif //cl_khr_fp64
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_init(volatile __global atomic_int *object, int value);
-void __ovld atomic_init(volatile __local atomic_int *object, int value);
-void __ovld atomic_init(volatile __global atomic_uint *object, uint value);
-void __ovld atomic_init(volatile __local atomic_uint *object, uint value);
-void __ovld atomic_init(volatile __global atomic_float *object, float value);
-void __ovld atomic_init(volatile __local atomic_float *object, float value);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-void __ovld atomic_init(volatile __global atomic_long *object, long value);
-void __ovld atomic_init(volatile __local atomic_long *object, long value);
-void __ovld atomic_init(volatile __global atomic_ulong *object, ulong value);
-void __ovld atomic_init(volatile __local atomic_ulong *object, ulong value);
-#ifdef cl_khr_fp64
-void __ovld atomic_init(volatile __global atomic_double *object, double value);
-void __ovld atomic_init(volatile __local atomic_double *object, double value);
-#endif //cl_khr_fp64
-#endif
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_work_item_fence()
-void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope);
-
-// atomic_fetch()
-// OpenCL v2.0 s6.13.11.7.5:
-// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t.
-
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_fetch_add(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_add(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_sub(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_or(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_or(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_xor(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_and(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_and(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_min(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_add(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_sub(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_sub(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_or(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_or(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_xor(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_xor(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_and(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_and(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_min(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *object, ptrdiff_t operand);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_fetch_add(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_add(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_add(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_add(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_sub(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_sub(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_sub(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_sub(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_or(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_or(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_or(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_or(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_xor(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_xor(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_xor(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_xor(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_and(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_and(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_and(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_and(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_min(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_min(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_min(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_min(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_max(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_max(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_max(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_max(volatile __local atomic_uint *object, uint operand);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_add(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_add(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_add(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_add(volatile __local atomic_uintptr_t *object, ptrdiff_t operand);
-long __ovld atomic_fetch_sub(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_sub(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_sub(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_sub(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_sub(volatile __global atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *object, ptrdiff_t operand);
-long __ovld atomic_fetch_or(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_or(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_or(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_or(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_or(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_or(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_or(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_or(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_xor(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_xor(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_xor(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_xor(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_xor(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_xor(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_xor(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_xor(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_and(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_and(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_and(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_and(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_and(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_and(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_and(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_and(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_min(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_min(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_min(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_min(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_min(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_min(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_min(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_min(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_max(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_max(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_max(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_max(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *object, ptrdiff_t operand);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// The functionality added by cl_ext_float_atomics extension
-#if defined(cl_ext_float_atomics)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_load_store)
-void __ovld atomic_store(volatile __global atomic_half *object, half operand);
-void __ovld atomic_store_explicit(volatile __global atomic_half *object,
-                                  half operand, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_half *object,
-                                  half operand, memory_order order,
-                                  memory_scope scope);
-half __ovld atomic_load(volatile __global atomic_half *object);
-half __ovld atomic_load_explicit(volatile __global atomic_half *object,
-                                 memory_order order);
-half __ovld atomic_load_explicit(volatile __global atomic_half *object,
-                                 memory_order order, memory_scope scope);
-half __ovld atomic_exchange(volatile __global atomic_half *object,
-                            half operand);
-half __ovld atomic_exchange_explicit(volatile __global atomic_half *object,
-                                     half operand, memory_order order);
-half __ovld atomic_exchange_explicit(volatile __global atomic_half *object,
-                                     half operand, memory_order order,
-                                     memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store)
-
-#if defined(__opencl_c_ext_fp16_local_atomic_load_store)
-void __ovld atomic_store(volatile __local atomic_half *object, half operand);
-void __ovld atomic_store_explicit(volatile __local atomic_half *object,
-                                  half operand, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_half *object,
-                                  half operand, memory_order order,
-                                  memory_scope scope);
-half __ovld atomic_load(volatile __local atomic_half *object);
-half __ovld atomic_load_explicit(volatile __local atomic_half *object,
-                                 memory_order order);
-half __ovld atomic_load_explicit(volatile __local atomic_half *object,
-                                 memory_order order, memory_scope scope);
-half __ovld atomic_exchange(volatile __local atomic_half *object, half operand);
-half __ovld atomic_exchange_explicit(volatile __local atomic_half *object,
-                                     half operand, memory_order order);
-half __ovld atomic_exchange_explicit(volatile __local atomic_half *object,
-                                     half operand, memory_order order,
-                                     memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_local_atomic_load_store)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_load_store) &&                   \
-    defined(__opencl_c_ext_fp16_local_atomic_load_store)
-void __ovld atomic_store(volatile atomic_half *object, half operand);
-void __ovld atomic_store_explicit(volatile atomic_half *object, half operand,
-                                  memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_half *object, half operand,
-                                  memory_order order, memory_scope scope);
-half __ovld atomic_load(volatile atomic_half *object);
-half __ovld atomic_load_explicit(volatile atomic_half *object,
-                                 memory_order order);
-half __ovld atomic_load_explicit(volatile atomic_half *object,
-                                 memory_order order, memory_scope scope);
-half __ovld atomic_exchange(volatile atomic_half *object, half operand);
-half __ovld atomic_exchange_explicit(volatile atomic_half *object, half operand,
-                                     memory_order order);
-half __ovld atomic_exchange_explicit(volatile atomic_half *object, half operand,
-                                     memory_order order, memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store) &&
-       // defined(__opencl_c_ext_fp16_local_atomic_load_store)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_min_max)
-half __ovld atomic_fetch_min(volatile __global atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_max(volatile __global atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp16_local_atomic_min_max)
-half __ovld atomic_fetch_min(volatile __local atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_max(volatile __local atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_min_max) &&                      \
-    defined(__opencl_c_ext_fp16_local_atomic_min_max)
-half __ovld atomic_fetch_min(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_max(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_min_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_max_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_min_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_max_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max) &&                \
-    defined(__opencl_c_ext_fp16_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp32_global_atomic_min_max)
-float __ovld atomic_fetch_min(volatile __global atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_max(volatile __global atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp32_local_atomic_min_max)
-float __ovld atomic_fetch_min(volatile __local atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_max(volatile __local atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp32_global_atomic_min_max) &&                      \
-    defined(__opencl_c_ext_fp32_local_atomic_min_max)
-float __ovld atomic_fetch_min(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_max(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_min_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_max_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_min_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_max_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max) &&                \
-    defined(__opencl_c_ext_fp32_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp64_global_atomic_min_max)
-double __ovld atomic_fetch_min(volatile __global atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_max(volatile __global atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp64_local_atomic_min_max)
-double __ovld atomic_fetch_min(volatile __local atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_max(volatile __local atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp64_global_atomic_min_max) &&                      \
-    defined(__opencl_c_ext_fp64_local_atomic_min_max)
-double __ovld atomic_fetch_min(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_max(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_min_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_max_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_min_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_max_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max) &&                \
-    defined(__opencl_c_ext_fp64_local_atomic_min_max)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_add)
-half __ovld atomic_fetch_add(volatile __global atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_sub(volatile __global atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_add)
-
-#if defined(__opencl_c_ext_fp16_local_atomic_add)
-half __ovld atomic_fetch_add(volatile __local atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_sub(volatile __local atomic_half *object,
-                             half operand);
-half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp16_global_atomic_add) &&                          \
-    defined(__opencl_c_ext_fp16_local_atomic_add)
-half __ovld atomic_fetch_add(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_sub(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_add_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_sub_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order);
-half __ovld atomic_fetch_add_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-half __ovld atomic_fetch_sub_explicit(volatile atomic_half *object,
-                                      half operand, memory_order order,
-                                      memory_scope scope);
-#endif // defined(__opencl_c_ext_fp16_global_atomic_add) &&                    \
-    defined(__opencl_c_ext_fp16_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp32_global_atomic_add)
-float __ovld atomic_fetch_add(volatile __global atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_sub(volatile __global atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_global_atomic_add)
-
-#if defined(__opencl_c_ext_fp32_local_atomic_add)
-float __ovld atomic_fetch_add(volatile __local atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_sub(volatile __local atomic_float *object,
-                              float operand);
-float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp32_global_atomic_add) &&                          \
-    defined(__opencl_c_ext_fp32_local_atomic_add)
-float __ovld atomic_fetch_add(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_sub(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_add_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_sub_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order);
-float __ovld atomic_fetch_add_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-float __ovld atomic_fetch_sub_explicit(volatile atomic_float *object,
-                                       float operand, memory_order order,
-                                       memory_scope scope);
-#endif // defined(__opencl_c_ext_fp32_global_atomic_add) &&                    \
-    defined(__opencl_c_ext_fp32_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp64_global_atomic_add)
-double __ovld atomic_fetch_add(volatile __global atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_sub(volatile __global atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_global_atomic_add)
-
-#if defined(__opencl_c_ext_fp64_local_atomic_add)
-double __ovld atomic_fetch_add(volatile __local atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_sub(volatile __local atomic_double *object,
-                               double operand);
-double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_local_atomic_add)
-
-#if defined(__opencl_c_ext_fp64_global_atomic_add) &&                          \
-    defined(__opencl_c_ext_fp64_local_atomic_add)
-double __ovld atomic_fetch_add(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_sub(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_add_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_sub_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order);
-double __ovld atomic_fetch_add_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-double __ovld atomic_fetch_sub_explicit(volatile atomic_double *object,
-                                        double operand, memory_order order,
-                                        memory_scope scope);
-#endif // defined(__opencl_c_ext_fp64_global_atomic_add) &&                    \
-    defined(__opencl_c_ext_fp64_local_atomic_add)
-
-#endif // cl_ext_float_atomics
-
-// atomic_store()
-
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_store(volatile atomic_int *object, int desired);
-void __ovld atomic_store(volatile atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile atomic_float *object, float desired);
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store(volatile atomic_double *object, double desired);
-#endif //cl_khr_fp64
-void __ovld atomic_store(volatile atomic_long *object, long desired);
-void __ovld atomic_store(volatile atomic_ulong *object, ulong desired);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_store(volatile __global atomic_int *object, int desired);
-void __ovld atomic_store(volatile __local atomic_int *object, int desired);
-void __ovld atomic_store(volatile __global atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile __local atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile __global atomic_float *object, float desired);
-void __ovld atomic_store(volatile __local atomic_float *object, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store(volatile __global atomic_double *object, double desired);
-void __ovld atomic_store(volatile __local atomic_double *object, double desired);
-#endif //cl_khr_fp64
-void __ovld atomic_store(volatile __global atomic_long *object, long desired);
-void __ovld atomic_store(volatile __local atomic_long *object, long desired);
-void __ovld atomic_store(volatile __global atomic_ulong *object, ulong desired);
-void __ovld atomic_store(volatile __local atomic_ulong *object, ulong desired);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order);
-#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_store_explicit(volatile __global atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_float *object, float desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_float *object, float desired, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile __global atomic_double *object, double desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_double *object, double desired, memory_order order);
-#endif
-void __ovld atomic_store_explicit(volatile __global atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_store_explicit(volatile __global atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __global atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __global atomic_float *object, float desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile __global atomic_double *object, double desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile __global atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_load()
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_load(volatile atomic_int *object);
-uint __ovld atomic_load(volatile atomic_uint *object);
-float __ovld atomic_load(volatile atomic_float *object);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load(volatile atomic_double *object);
-#endif //cl_khr_fp64
-long __ovld atomic_load(volatile atomic_long *object);
-ulong __ovld atomic_load(volatile atomic_ulong *object);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_load(volatile __global atomic_int *object);
-int __ovld atomic_load(volatile __local atomic_int *object);
-uint __ovld atomic_load(volatile __global atomic_uint *object);
-uint __ovld atomic_load(volatile __local atomic_uint *object);
-float __ovld atomic_load(volatile __global atomic_float *object);
-float __ovld atomic_load(volatile __local atomic_float *object);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load(volatile __global atomic_double *object);
-double __ovld atomic_load(volatile __local atomic_double *object);
-#endif //cl_khr_fp64
-long __ovld atomic_load(volatile __global atomic_long *object);
-long __ovld atomic_load(volatile __local atomic_long *object);
-ulong __ovld atomic_load(volatile __global atomic_ulong *object);
-ulong __ovld atomic_load(volatile __local atomic_ulong *object);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order);
-#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_load_explicit(volatile __global atomic_int *object, memory_order order);
-int __ovld atomic_load_explicit(volatile __local atomic_int *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile __global atomic_uint *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile __local atomic_uint *object, memory_order order);
-float __ovld atomic_load_explicit(volatile __global atomic_float *object, memory_order order);
-float __ovld atomic_load_explicit(volatile __local atomic_float *object, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile __global atomic_double *object, memory_order order);
-double __ovld atomic_load_explicit(volatile __local atomic_double *object, memory_order order);
-#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile __global atomic_long *object, memory_order order);
-long __ovld atomic_load_explicit(volatile __local atomic_long *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *object, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_load_explicit(volatile __global atomic_int *object, memory_order order, memory_scope scope);
-int __ovld atomic_load_explicit(volatile __local atomic_int *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile __global atomic_uint *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile __local atomic_uint *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile __global atomic_float *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile __local atomic_float *object, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile __global atomic_double *object, memory_order order, memory_scope scope);
-double __ovld atomic_load_explicit(volatile __local atomic_double *object, memory_order order, memory_scope scope);
-#endif
-long __ovld atomic_load_explicit(volatile __global atomic_long *object, memory_order order, memory_scope scope);
-long __ovld atomic_load_explicit(volatile __local atomic_long *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *object, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_exchange()
-
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_exchange(volatile atomic_int *object, int desired);
-uint __ovld atomic_exchange(volatile atomic_uint *object, uint desired);
-float __ovld atomic_exchange(volatile atomic_float *object, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange(volatile atomic_double *object, double desired);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange(volatile atomic_long *object, long desired);
-ulong __ovld atomic_exchange(volatile atomic_ulong *object, ulong desired);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_exchange(volatile __global atomic_int *object, int desired);
-int __ovld atomic_exchange(volatile __local atomic_int *object, int desired);
-uint __ovld atomic_exchange(volatile __global atomic_uint *object, uint desired);
-uint __ovld atomic_exchange(volatile __local atomic_uint *object, uint desired);
-float __ovld atomic_exchange(volatile __global atomic_float *object, float desired);
-float __ovld atomic_exchange(volatile __local atomic_float *object, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange(volatile __global atomic_double *object, double desired);
-double __ovld atomic_exchange(volatile __local atomic_double *object, double desired);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange(volatile __global atomic_long *object, long desired);
-long __ovld atomic_exchange(volatile __local atomic_long *object, long desired);
-ulong __ovld atomic_exchange(volatile __global atomic_ulong *object, ulong desired);
-ulong __ovld atomic_exchange(volatile __local atomic_ulong *object, ulong desired);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_exchange_explicit(volatile __global atomic_int *object, int desired, memory_order order);
-int __ovld atomic_exchange_explicit(volatile __local atomic_int *object, int desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *object, uint desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *object, uint desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile __global atomic_float *object, float desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile __local atomic_float *object, float desired, memory_order order);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile __global atomic_double *object, double desired, memory_order order);
-double __ovld atomic_exchange_explicit(volatile __local atomic_double *object, double desired, memory_order order);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile __global atomic_long *object, long desired, memory_order order);
-long __ovld atomic_exchange_explicit(volatile __local atomic_long *object, long desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)wi
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_exchange_explicit(volatile __global atomic_int *object, int desired, memory_order order, memory_scope scope);
-int __ovld atomic_exchange_explicit(volatile __local atomic_int *object, int desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile __global atomic_float *object, float desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile __local atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile __global atomic_double *object, double desired, memory_order order, memory_scope scope);
-double __ovld atomic_exchange_explicit(volatile __local atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile __global atomic_long *object, long desired, memory_order order, memory_scope scope);
-long __ovld atomic_exchange_explicit(volatile __local atomic_long *object, long desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_compare_exchange_strong() and atomic_compare_exchange_weak()
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *object, uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *object, uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_float *object, float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_float *object, float *expected, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_double *object, double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_double *object, double *expected, double desired);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_long *object, long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_long *object, long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *object, ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *object, ulong *expected, ulong desired);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __private float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __private float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __private float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __private float *expected, float desired);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __private double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __private double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __private double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __private double *expected, double desired);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __private ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __private ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __private ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __private ulong *expected, ulong desired);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-#endif
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __global int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __local int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __private int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __global uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __local uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __private uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __global float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __local float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __private float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __global double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __local double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __private double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __global long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __local long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __private long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-
-// atomic_flag_test_and_set() and atomic_flag_clear()
-#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object);
-void __ovld atomic_flag_clear(volatile atomic_flag *object);
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_flag_test_and_set(volatile __global atomic_flag *object);
-bool __ovld atomic_flag_test_and_set(volatile __local atomic_flag *object);
-void __ovld atomic_flag_clear(volatile __global atomic_flag *object);
-void __ovld atomic_flag_clear(volatile __local atomic_flag *object);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_atomic_scope_device)
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order);
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *object, memory_order order);
-bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *object, memory_order order);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif
-
-#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
-#endif //defined(__opencl_c_generic_address_space)
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *object, memory_order order, memory_scope scope);
-bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *object, memory_order order, memory_scope scope);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
-
-/**
- * The shuffle and shuffle2 built-in functions construct
- * a permutation of elements from one or two input
- * vectors respectively that are of the same type,
- * returning a vector with the same element type as the
- * input and length that is the same as the shuffle mask.
- * The size of each element in the mask must match the
- * size of each element in the result. For shuffle, only
- * the ilogb(2m-1) least significant bits of each mask
- * element are considered. For shuffle2, only the
- * ilogb(2m-1)+1 least significant bits of each mask
- * element are considered. Other bits in the mask shall
- * be ignored.
- * The elements of the input vectors are numbered from
- * left to right across one or both of the vectors. For this
- * purpose, the number of elements in a vector is given
- * by vec_step(gentypem). The shuffle mask operand
- * specifies, for each element of the result vector, which
- * element of the one or two input vectors the result
- * element gets.
- * Examples:
- * uint4 mask = (uint4)(3, 2,
- * 1, 0);
- * float4 a;
- * float4 r = shuffle(a, mask);
- * // r.s0123 = a.wzyx
- * uint8 mask = (uint8)(0, 1, 2, 3,
- * 4, 5, 6, 7);
- * float4 a, b;
- * float8 r = shuffle2(a, b, mask);
- * // r.s0123 = a.xyzw
- * // r.s4567 = b.xyzw
- * uint4 mask;
- * float8 a;
- * float4 b;
- * b = shuffle(a, mask);
- * Examples that are not valid are:
- * uint8 mask;
- * short16 a;
- * short8 b;
- * b = shuffle(a, mask); <- not valid
- */
-char2 __ovld __cnfn shuffle(char2 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char4 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char8 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char16 x, uchar2 mask);
-
-uchar2 __ovld __cnfn shuffle(uchar2 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar4 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar8 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar16 x, uchar2 mask);
-
-short2 __ovld __cnfn shuffle(short2 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short4 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short8 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short16 x, ushort2 mask);
-
-ushort2 __ovld __cnfn shuffle(ushort2 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort4 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort8 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort16 x, ushort2 mask);
-
-int2 __ovld __cnfn shuffle(int2 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int4 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int8 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int16 x, uint2 mask);
-
-uint2 __ovld __cnfn shuffle(uint2 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint4 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint8 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint16 x, uint2 mask);
-
-long2 __ovld __cnfn shuffle(long2 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long4 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long8 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long16 x, ulong2 mask);
-
-ulong2 __ovld __cnfn shuffle(ulong2 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong4 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong8 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong16 x, ulong2 mask);
-
-float2 __ovld __cnfn shuffle(float2 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float4 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float8 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float16 x, uint2 mask);
-
-char4 __ovld __cnfn shuffle(char2 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char4 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char8 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char16 x, uchar4 mask);
-
-uchar4 __ovld __cnfn shuffle(uchar2 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar4 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar8 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar16 x, uchar4 mask);
-
-short4 __ovld __cnfn shuffle(short2 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short4 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short8 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short16 x, ushort4 mask);
-
-ushort4 __ovld __cnfn shuffle(ushort2 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort4 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort8 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort16 x, ushort4 mask);
-
-int4 __ovld __cnfn shuffle(int2 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int4 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int8 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int16 x, uint4 mask);
-
-uint4 __ovld __cnfn shuffle(uint2 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint4 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint8 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint16 x, uint4 mask);
-
-long4 __ovld __cnfn shuffle(long2 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long4 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long8 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long16 x, ulong4 mask);
-
-ulong4 __ovld __cnfn shuffle(ulong2 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong4 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong8 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong16 x, ulong4 mask);
-
-float4 __ovld __cnfn shuffle(float2 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float4 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float8 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float16 x, uint4 mask);
-
-char8 __ovld __cnfn shuffle(char2 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char4 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char8 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char16 x, uchar8 mask);
-
-uchar8 __ovld __cnfn shuffle(uchar2 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar4 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar8 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar16 x, uchar8 mask);
-
-short8 __ovld __cnfn shuffle(short2 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short4 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short8 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short16 x, ushort8 mask);
-
-ushort8 __ovld __cnfn shuffle(ushort2 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort4 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort8 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort16 x, ushort8 mask);
-
-int8 __ovld __cnfn shuffle(int2 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int4 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int8 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int16 x, uint8 mask);
-
-uint8 __ovld __cnfn shuffle(uint2 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint4 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint8 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint16 x, uint8 mask);
-
-long8 __ovld __cnfn shuffle(long2 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long4 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long8 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long16 x, ulong8 mask);
-
-ulong8 __ovld __cnfn shuffle(ulong2 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong4 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong8 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong16 x, ulong8 mask);
-
-float8 __ovld __cnfn shuffle(float2 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float4 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float8 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float16 x, uint8 mask);
-
-char16 __ovld __cnfn shuffle(char2 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char4 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char8 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char16 x, uchar16 mask);
-
-uchar16 __ovld __cnfn shuffle(uchar2 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar4 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar8 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar16 x, uchar16 mask);
-
-short16 __ovld __cnfn shuffle(short2 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short4 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short8 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short16 x, ushort16 mask);
-
-ushort16 __ovld __cnfn shuffle(ushort2 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort4 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort8 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort16 x, ushort16 mask);
-
-int16 __ovld __cnfn shuffle(int2 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int4 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int8 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int16 x, uint16 mask);
-
-uint16 __ovld __cnfn shuffle(uint2 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint4 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint8 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint16 x, uint16 mask);
-
-long16 __ovld __cnfn shuffle(long2 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long4 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long8 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long16 x, ulong16 mask);
-
-ulong16 __ovld __cnfn shuffle(ulong2 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong4 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong8 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong16 x, ulong16 mask);
-
-float16 __ovld __cnfn shuffle(float2 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float4 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float8 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float16 x, uint16 mask);
-
-#ifdef cl_khr_fp64
-double2 __ovld __cnfn shuffle(double2 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double4 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double8 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double16 x, ulong2 mask);
-
-double4 __ovld __cnfn shuffle(double2 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double4 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double8 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double16 x, ulong4 mask);
-
-double8 __ovld __cnfn shuffle(double2 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double4 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double8 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double16 x, ulong8 mask);
-
-double16 __ovld __cnfn shuffle(double2 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double4 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double8 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double16 x, ulong16 mask);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half2 __ovld __cnfn shuffle(half2 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half4 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half8 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half16 x, ushort2 mask);
-
-half4 __ovld __cnfn shuffle(half2 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half4 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half8 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half16 x, ushort4 mask);
-
-half8 __ovld __cnfn shuffle(half2 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half4 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half8 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half16 x, ushort8 mask);
-
-half16 __ovld __cnfn shuffle(half2 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half4 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half8 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half16 x, ushort16 mask);
-#endif //cl_khr_fp16
-
-char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char4 x, char4 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char8 x, char8 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char16 x, char16 y, uchar2 mask);
-
-uchar2 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar2 mask);
-
-short2 __ovld __cnfn shuffle2(short2 x, short2 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short4 x, short4 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short8 x, short8 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short16 x, short16 y, ushort2 mask);
-
-ushort2 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort2 mask);
-
-int2 __ovld __cnfn shuffle2(int2 x, int2 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int4 x, int4 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int8 x, int8 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int16 x, int16 y, uint2 mask);
-
-uint2 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint2 mask);
-
-long2 __ovld __cnfn shuffle2(long2 x, long2 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long4 x, long4 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long8 x, long8 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long16 x, long16 y, ulong2 mask);
-
-ulong2 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong2 mask);
-
-float2 __ovld __cnfn shuffle2(float2 x, float2 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float4 x, float4 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float8 x, float8 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float16 x, float16 y, uint2 mask);
-
-char4 __ovld __cnfn shuffle2(char2 x, char2 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char4 x, char4 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char8 x, char8 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char16 x, char16 y, uchar4 mask);
-
-uchar4 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar4 mask);
-
-short4 __ovld __cnfn shuffle2(short2 x, short2 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short4 x, short4 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short8 x, short8 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short16 x, short16 y, ushort4 mask);
-
-ushort4 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort4 mask);
-
-int4 __ovld __cnfn shuffle2(int2 x, int2 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int4 x, int4 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int8 x, int8 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int16 x, int16 y, uint4 mask);
-
-uint4 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint4 mask);
-
-long4 __ovld __cnfn shuffle2(long2 x, long2 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long4 x, long4 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long8 x, long8 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long16 x, long16 y, ulong4 mask);
-
-ulong4 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong4 mask);
-
-float4 __ovld __cnfn shuffle2(float2 x, float2 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float4 x, float4 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float8 x, float8 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float16 x, float16 y, uint4 mask);
-
-char8 __ovld __cnfn shuffle2(char2 x, char2 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char4 x, char4 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char8 x, char8 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char16 x, char16 y, uchar8 mask);
-
-uchar8 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar8 mask);
-
-short8 __ovld __cnfn shuffle2(short2 x, short2 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short4 x, short4 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short8 x, short8 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short16 x, short16 y, ushort8 mask);
-
-ushort8 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort8 mask);
-
-int8 __ovld __cnfn shuffle2(int2 x, int2 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int4 x, int4 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int8 x, int8 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int16 x, int16 y, uint8 mask);
-
-uint8 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint8 mask);
-
-long8 __ovld __cnfn shuffle2(long2 x, long2 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long4 x, long4 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long8 x, long8 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long16 x, long16 y, ulong8 mask);
-
-ulong8 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong8 mask);
-
-float8 __ovld __cnfn shuffle2(float2 x, float2 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float4 x, float4 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float8 x, float8 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float16 x, float16 y, uint8 mask);
-
-char16 __ovld __cnfn shuffle2(char2 x, char2 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char4 x, char4 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char8 x, char8 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char16 x, char16 y, uchar16 mask);
-
-uchar16 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar16 mask);
-
-short16 __ovld __cnfn shuffle2(short2 x, short2 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short4 x, short4 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short8 x, short8 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short16 x, short16 y, ushort16 mask);
-
-ushort16 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort16 mask);
-
-int16 __ovld __cnfn shuffle2(int2 x, int2 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int4 x, int4 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int8 x, int8 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int16 x, int16 y, uint16 mask);
-
-uint16 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint16 mask);
-
-long16 __ovld __cnfn shuffle2(long2 x, long2 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long4 x, long4 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long8 x, long8 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long16 x, long16 y, ulong16 mask);
-
-ulong16 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong16 mask);
-
-float16 __ovld __cnfn shuffle2(float2 x, float2 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float4 x, float4 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float8 x, float8 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float16 x, float16 y, uint16 mask);
-
-#ifdef cl_khr_fp64
-double2 __ovld __cnfn shuffle2(double2 x, double2 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double4 x, double4 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double8 x, double8 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double16 x, double16 y, ulong2 mask);
-
-double4 __ovld __cnfn shuffle2(double2 x, double2 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double4 x, double4 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double8 x, double8 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double16 x, double16 y, ulong4 mask);
-
-double8 __ovld __cnfn shuffle2(double2 x, double2 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double4 x, double4 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double8 x, double8 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double16 x, double16 y, ulong8 mask);
-
-double16 __ovld __cnfn shuffle2(double2 x, double2 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double4 x, double4 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double8 x, double8 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double16 x, double16 y, ulong16 mask);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half2 __ovld __cnfn shuffle2(half2 x, half2 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half4 x, half4 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half8 x, half8 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half16 x, half16 y, ushort2 mask);
-
-half4 __ovld __cnfn shuffle2(half2 x, half2 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half4 x, half4 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half8 x, half8 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half16 x, half16 y, ushort4 mask);
-
-half8 __ovld __cnfn shuffle2(half2 x, half2 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half4 x, half4 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half8 x, half8 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half16 x, half16 y, ushort8 mask);
-
-half16 __ovld __cnfn shuffle2(half2 x, half2 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half4 x, half4 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
-
-#ifdef cl_khr_gl_msaa_sharing
-#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
-#endif //cl_khr_gl_msaa_sharing
-
-/**
- * Use the coordinate (coord.xy) to do an element lookup in
- * the 2D image object specified by image.
- *
- * Use the coordinate (coord.x, coord.y, coord.z) to do
- * an element lookup in the 3D image object specified
- * by image. coord.w is ignored.
- *
- * Use the coordinate (coord.z) to index into the
- * 2D image array object specified by image_array
- * and (coord.x, coord.y) to do an element lookup in
- * the 2D image object specified by image.
- *
- * Use the coordinate (x) to do an element lookup in
- * the 1D image object specified by image.
- *
- * Use the coordinate (coord.y) to index into the
- * 1D image array object specified by image_array
- * and (coord.x) to do an element lookup in
- * the 1D image object specified by image.
- *
- * Use the coordinate (cood.xy) and sample to do an
- * element lookup in the 2D multi-sample image specified
- * by image.
- *
- * Use coord.xy and sample to do an element
- * lookup in the 2D multi-sample image layer
- * identified by index coord.z in the 2D multi-sample
- * image array specified by image.
- *
- * For mipmap images, use the mip-level specified by
- * the Level-of-Detail (lod) or use gradients for LOD
- * computation.
- *
- * read_imagef returns floating-point values in the
- * range [0.0 ... 1.0] for image objects created with
- * image_channel_data_type set to one of the predefined
- * packed formats or CL_UNORM_INT8, or
- * CL_UNORM_INT16.
- *
- * read_imagef returns floating-point values in the
- * range [-1.0 ... 1.0] for image objects created with
- * image_channel_data_type set to CL_SNORM_INT8,
- * or CL_SNORM_INT16.
- *
- * read_imagef returns floating-point values for image
- * objects created with image_channel_data_type set to
- * CL_HALF_FLOAT or CL_FLOAT.
- *
- * read_imagei and read_imageui return
- * unnormalized signed integer and unsigned integer
- * values respectively. Each channel will be stored in a
- * 32-bit integer.
- *
- * read_imagei can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_SIGNED_INT8,
- * CL_SIGNED_INT16 and
- * CL_SIGNED_INT32.
- * If the image_channel_data_type is not one of the
- * above values, the values returned by read_imagei
- * are undefined.
- *
- * read_imageui can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_UNSIGNED_INT8,
- * CL_UNSIGNED_INT16 and
- * CL_UNSIGNED_INT32.
- * If the image_channel_data_type is not one of the
- * above values, the values returned by read_imageui
- * are undefined.
- *
- * The read_image{i|ui} calls support a nearest filter
- * only. The filter_mode specified in sampler
- * must be set to CLK_FILTER_NEAREST; otherwise
- * the values returned are undefined.
-
- * The read_image{f|i|ui} calls that take
- * integer coordinates must use a sampler with
- * normalized coordinates set to
- * CLK_NORMALIZED_COORDS_FALSE and
- * addressing mode set to
- * CLK_ADDRESS_CLAMP_TO_EDGE,
- * CLK_ADDRESS_CLAMP or CLK_ADDRESS_NONE;
- * otherwise the values returned are undefined.
- *
- * Values returned by read_imagef for image objects
- * with image_channel_data_type values not specified
- * in the description above are undefined.
- */
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord);
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord);
-
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, int4 coord);
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord);
-
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, int coord);
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord);
-
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord);
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, int2 coord);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, int4 coord);
-#endif //cl_khr_depth_images
-
-#if defined(cl_khr_gl_msaa_sharing)
-float4 __purefn __ovld read_imagef(read_only image2d_msaa_t image, int2 coord, int sample);
-int4 __purefn __ovld read_imagei(read_only image2d_msaa_t image, int2 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_only image2d_msaa_t image, int2 coord, int sample);
-
-float __purefn __ovld read_imagef(read_only image2d_msaa_depth_t image, int2 coord, int sample);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_msaa_t image, int4 coord, int sample);
-int4 __purefn __ovld read_imagei(read_only image2d_array_msaa_t image, int4 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_msaa_t image, int4 coord, int sample);
-
-float __purefn __ovld read_imagef(read_only image2d_array_msaa_depth_t image, int4 coord, int sample);
-#endif //cl_khr_gl_msaa_sharing
-
-// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-
-#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-/**
-* Sampler-less Image Access
-*/
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_only image1d_buffer_t image, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_buffer_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_buffer_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image, int4 coord);
-
-#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, int2 coord);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, int4 coord);
-#endif //cl_khr_depth_images
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, int4 coord);
-
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-// Image read functions returning half4 type
-#ifdef cl_khr_fp16
-half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, int coord);
-half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, float coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord);
-/**
- * Sampler-less Image Access
- */
-half4 __purefn __ovld read_imageh(read_only image1d_t image, int coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-#endif //cl_khr_fp16
-
-// Image read functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_write image1d_buffer_t image, int coord);
-int4 __purefn __ovld read_imagei(read_write image1d_buffer_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_buffer_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image, int4 coord);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, int4 coord);
-
-#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, int2 coord);
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, int4 coord);
-#endif //cl_khr_depth_images
-
-#if cl_khr_gl_msaa_sharing
-float4 __purefn __ovld read_imagef(read_write image2d_msaa_t image, int2 coord, int sample);
-int4 __purefn __ovld read_imagei(read_write image2d_msaa_t image, int2 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_write image2d_msaa_t image, int2 coord, int sample);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_msaa_t image, int4 coord, int sample);
-int4 __purefn __ovld read_imagei(read_write image2d_array_msaa_t image, int4 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_msaa_t image, int4 coord, int sample);
-
-float __purefn __ovld read_imagef(read_write image2d_msaa_depth_t image, int2 coord, int sample);
-float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-
-#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Image read functions returning half4 type
-#ifdef cl_khr_fp16
-half4 __purefn __ovld read_imageh(read_write image1d_t image, int coord);
-half4 __purefn __ovld read_imageh(read_write image2d_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_write image3d_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Write color value to location specified by coordinate
- * (coord.x, coord.y) in the 2D image object specified by image.
- * (coord.x, coord.y) are considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1, and 0
- * ... image height - 1.
-
- * Write color value to location specified by coordinate
- * (coord.x, coord.y) in the 2D image object specified by index
- * (coord.z) of the 2D image array object image_array.
- * (coord.x, coord.y) are considered to be unnormalized
- * coordinates and must be in the range 0 ... image width
- * - 1.
- *
- * Write color value to location specified by coordinate
- * (coord) in the 1D image (buffer) object specified by image.
- * coord is considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1.
- *
- * Write color value to location specified by coordinate
- * (coord.x) in the 1D image object specified by index
- * (coord.y) of the 1D image array object image_array.
- * x is considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1.
- *
- * Write color value to location specified by coordinate
- * (coord.x, coord.y, coord.z) in the 3D image object specified by image.
- * coord.x & coord.y are considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1, and 0
- * ... image height - 1.
- *
- * For mipmap images, use mip-level specified by lod.
- *
- * Appropriate data format conversion to the specified
- * image format is done before writing the color value.
- *
- * write_imagef can only be used with image objects
- * created with image_channel_data_type set to one of
- * the pre-defined packed formats or set to
- * CL_SNORM_INT8, CL_UNORM_INT8,
- * CL_SNORM_INT16, CL_UNORM_INT16,
- * CL_HALF_FLOAT or CL_FLOAT. Appropriate data
- * format conversion will be done to convert channel
- * data from a floating-point value to actual data format
- * in which the channels are stored.
- *
- * write_imagei can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_SIGNED_INT8,
- * CL_SIGNED_INT16 and
- * CL_SIGNED_INT32.
- *
- * write_imageui can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_UNSIGNED_INT8,
- * CL_UNSIGNED_INT16 and
- * CL_UNSIGNED_INT32.
- *
- * The behavior of write_imagef, write_imagei and
- * write_imageui for image objects created with
- * image_channel_data_type values not specified in
- * the description above or with (x, y) coordinate
- * values that are not in the range (0 ... image width -1,
- * 0 ... image height - 1), respectively, is undefined.
- */
-void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color);
-void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color);
-void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color);
-
-void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color);
-void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color);
-void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color);
-
-void __ovld write_imagef(write_only image1d_t image, int coord, float4 color);
-void __ovld write_imagei(write_only image1d_t image, int coord, int4 color);
-void __ovld write_imageui(write_only image1d_t image, int coord, uint4 color);
-
-void __ovld write_imagef(write_only image1d_buffer_t image, int coord, float4 color);
-void __ovld write_imagei(write_only image1d_buffer_t image, int coord, int4 color);
-void __ovld write_imageui(write_only image1d_buffer_t image, int coord, uint4 color);
-
-void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color);
-void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color);
-void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(write_only image3d_t image, int4 coord, float4 color);
-void __ovld write_imagei(write_only image3d_t image, int4 coord, int4 color);
-void __ovld write_imageui(write_only image3d_t image, int4 coord, uint4 color);
-#endif
-
-#ifdef cl_khr_depth_images
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, float color);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, float color);
-#endif //cl_khr_depth_images
-
-// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#if defined(cl_khr_mipmap_image_writes)
-void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color);
-void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color);
-void __ovld write_imageui(write_only image1d_t image, int coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image2d_t image, int2 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image2d_t image, int2 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image2d_t image, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float depth);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float depth);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(write_only image3d_t image, int4 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color);
-#endif //cl_khr_3d_image_writes
-
-#endif //defined(cl_khr_mipmap_image_writes)
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Image write functions for half4 type
-#ifdef cl_khr_fp16
-void __ovld write_imageh(write_only image1d_t image, int coord, half4 color);
-void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color);
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color);
-#endif
-void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color);
-void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color);
-void __ovld write_imageh(write_only image1d_buffer_t image, int coord, half4 color);
-#endif //cl_khr_fp16
-
-// Image write functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color);
-void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color);
-void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color);
-
-void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color);
-void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color);
-void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color);
-
-void __ovld write_imagef(read_write image1d_t image, int coord, float4 color);
-void __ovld write_imagei(read_write image1d_t image, int coord, int4 color);
-void __ovld write_imageui(read_write image1d_t image, int coord, uint4 color);
-
-void __ovld write_imagef(read_write image1d_buffer_t image, int coord, float4 color);
-void __ovld write_imagei(read_write image1d_buffer_t image, int coord, int4 color);
-void __ovld write_imageui(read_write image1d_buffer_t image, int coord, uint4 color);
-
-void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color);
-void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color);
-void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(read_write image3d_t image, int4 coord, float4 color);
-void __ovld write_imagei(read_write image3d_t image, int4 coord, int4 color);
-void __ovld write_imageui(read_write image3d_t image, int4 coord, uint4 color);
-#endif
-
-#ifdef cl_khr_depth_images
-void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, float color);
-void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color);
-#endif //cl_khr_depth_images
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#if defined(cl_khr_mipmap_image_writes)
-void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
-void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
-void __ovld write_imageui(read_write image1d_t image, int coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image2d_t image, int2 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image2d_t image, int2 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image2d_t image, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, int lod, float color);
-void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, int lod, float color);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(read_write image3d_t image, int4 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color);
-#endif //cl_khr_3d_image_writes
-
-#endif //cl_khr_mipmap_image_writes
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Image write functions for half4 type
-#ifdef cl_khr_fp16
-void __ovld write_imageh(read_write image1d_t image, int coord, half4 color);
-void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color);
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color);
-#endif
-void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color);
-void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
-void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
-// access qualifier, which by default assume read_only access qualifier. Image query builtin
-// functions with write_only image argument should also be declared.
-
-/**
- * Return the image width in pixels.
- *
-  */
-int __ovld __cnfn get_image_width(read_only image1d_t image);
-int __ovld __cnfn get_image_width(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_width(read_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_width(read_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_width(read_only image1d_array_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_width(write_only image1d_t image);
-int __ovld __cnfn get_image_width(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_width(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_width(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_width(write_only image1d_array_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_width(read_write image1d_t image);
-int __ovld __cnfn get_image_width(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_width(read_write image2d_t image);
-int __ovld __cnfn get_image_width(read_write image3d_t image);
-int __ovld __cnfn get_image_width(read_write image1d_array_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image height in pixels.
- */
-int __ovld __cnfn get_image_height(read_only image2d_t image);
-int __ovld __cnfn get_image_height(read_only image3d_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_height(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_height(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_height(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_height(read_write image2d_t image);
-int __ovld __cnfn get_image_height(read_write image3d_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image depth in pixels.
- */
-int __ovld __cnfn get_image_depth(read_only image3d_t image);
-
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_depth(write_only image3d_t image);
-#endif
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_depth(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
-/**
- * Return the image miplevels.
- */
-
-int __ovld get_image_num_mip_levels(read_only image1d_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_t image);
-int __ovld get_image_num_mip_levels(read_only image3d_t image);
-
-int __ovld get_image_num_mip_levels(write_only image1d_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld get_image_num_mip_levels(write_only image3d_t image);
-#endif
-
-int __ovld get_image_num_mip_levels(read_write image1d_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_t image);
-int __ovld get_image_num_mip_levels(read_write image3d_t image);
-
-int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_depth_t image);
-
-int __ovld get_image_num_mip_levels(write_only image1d_array_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_array_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
-
-int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
-
-#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the channel data type. Valid values are:
- * CLK_SNORM_INT8
- * CLK_SNORM_INT16
- * CLK_UNORM_INT8
- * CLK_UNORM_INT16
- * CLK_UNORM_SHORT_565
- * CLK_UNORM_SHORT_555
- * CLK_UNORM_SHORT_101010
- * CLK_SIGNED_INT8
- * CLK_SIGNED_INT16
- * CLK_SIGNED_INT32
- * CLK_UNSIGNED_INT8
- * CLK_UNSIGNED_INT16
- * CLK_UNSIGNED_INT32
- * CLK_HALF_FLOAT
- * CLK_FLOAT
- */
-
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image3d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_channel_data_type(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image3d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image channel order. Valid values are:
- * CLK_A
- * CLK_R
- * CLK_Rx
- * CLK_RG
- * CLK_RGx
- * CLK_RA
- * CLK_RGB
- * CLK_RGBx
- * CLK_RGBA
- * CLK_ARGB
- * CLK_BGRA
- * CLK_INTENSITY
- * CLK_LUMINANCE
- */
-
-int __ovld __cnfn get_image_channel_order(read_only image1d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image3d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_channel_order(write_only image1d_t image);
-int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_channel_order(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_channel_order(write_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_channel_order(read_write image1d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image3d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the 2D image width and height as an int2
- * type. The width is returned in the x component, and
- * the height in the y component.
- */
-int2 __ovld __cnfn get_image_dim(read_only image2d_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int2 __ovld __cnfn get_image_dim(write_only image2d_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int2 __ovld __cnfn get_image_dim(read_write image2d_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the 3D image width, height, and depth as an
- * int4 type. The width is returned in the x
- * component, height in the y component, depth in the z
- * component and the w component is 0.
- */
-int4 __ovld __cnfn get_image_dim(read_only image3d_t image);
-#ifdef cl_khr_3d_image_writes
-int4 __ovld __cnfn get_image_dim(write_only image3d_t image);
-#endif
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int4 __ovld __cnfn get_image_dim(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image array size.
- */
-
-size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t image_array);
-#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t image_array);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t image_array);
-#endif //cl_khr_gl_msaa_sharing
-
-size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t image_array);
-#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t image_array);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array);
-#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t image_array);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
-* Return the number of samples associated with image
-*/
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld get_image_num_samples(read_only image2d_msaa_t image);
-int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
-
-int __ovld get_image_num_samples(write_only image2d_msaa_t image);
-int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld get_image_num_samples(read_write image2d_msaa_t image);
-int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#endif
-
-// OpenCL v2.0 s6.13.15 - Work-group Functions
-
-#if defined(__opencl_c_work_group_collective_functions)
-int __ovld __conv work_group_all(int predicate);
-int __ovld __conv work_group_any(int predicate);
-
-#ifdef cl_khr_fp16
-half __ovld __conv work_group_broadcast(half a, size_t local_id);
-half __ovld __conv work_group_broadcast(half a, size_t x, size_t y);
-half __ovld __conv work_group_broadcast(half a, size_t x, size_t y, size_t z);
-#endif
-int __ovld __conv work_group_broadcast(int a, size_t local_id);
-int __ovld __conv work_group_broadcast(int a, size_t x, size_t y);
-int __ovld __conv work_group_broadcast(int a, size_t x, size_t y, size_t z);
-uint __ovld __conv work_group_broadcast(uint a, size_t local_id);
-uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y);
-uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y, size_t z);
-long __ovld __conv work_group_broadcast(long a, size_t local_id);
-long __ovld __conv work_group_broadcast(long a, size_t x, size_t y);
-long __ovld __conv work_group_broadcast(long a, size_t x, size_t y, size_t z);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t local_id);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y, size_t z);
-float __ovld __conv work_group_broadcast(float a, size_t local_id);
-float __ovld __conv work_group_broadcast(float a, size_t x, size_t y);
-float __ovld __conv work_group_broadcast(float a, size_t x, size_t y, size_t z);
-#ifdef cl_khr_fp64
-double __ovld __conv work_group_broadcast(double a, size_t local_id);
-double __ovld __conv work_group_broadcast(double a, size_t x, size_t y);
-double __ovld __conv work_group_broadcast(double a, size_t x, size_t y, size_t z);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld __conv work_group_reduce_add(half x);
-half __ovld __conv work_group_reduce_min(half x);
-half __ovld __conv work_group_reduce_max(half x);
-half __ovld __conv work_group_scan_exclusive_add(half x);
-half __ovld __conv work_group_scan_exclusive_min(half x);
-half __ovld __conv work_group_scan_exclusive_max(half x);
-half __ovld __conv work_group_scan_inclusive_add(half x);
-half __ovld __conv work_group_scan_inclusive_min(half x);
-half __ovld __conv work_group_scan_inclusive_max(half x);
-#endif
-int __ovld __conv work_group_reduce_add(int x);
-int __ovld __conv work_group_reduce_min(int x);
-int __ovld __conv work_group_reduce_max(int x);
-int __ovld __conv work_group_scan_exclusive_add(int x);
-int __ovld __conv work_group_scan_exclusive_min(int x);
-int __ovld __conv work_group_scan_exclusive_max(int x);
-int __ovld __conv work_group_scan_inclusive_add(int x);
-int __ovld __conv work_group_scan_inclusive_min(int x);
-int __ovld __conv work_group_scan_inclusive_max(int x);
-uint __ovld __conv work_group_reduce_add(uint x);
-uint __ovld __conv work_group_reduce_min(uint x);
-uint __ovld __conv work_group_reduce_max(uint x);
-uint __ovld __conv work_group_scan_exclusive_add(uint x);
-uint __ovld __conv work_group_scan_exclusive_min(uint x);
-uint __ovld __conv work_group_scan_exclusive_max(uint x);
-uint __ovld __conv work_group_scan_inclusive_add(uint x);
-uint __ovld __conv work_group_scan_inclusive_min(uint x);
-uint __ovld __conv work_group_scan_inclusive_max(uint x);
-long __ovld __conv work_group_reduce_add(long x);
-long __ovld __conv work_group_reduce_min(long x);
-long __ovld __conv work_group_reduce_max(long x);
-long __ovld __conv work_group_scan_exclusive_add(long x);
-long __ovld __conv work_group_scan_exclusive_min(long x);
-long __ovld __conv work_group_scan_exclusive_max(long x);
-long __ovld __conv work_group_scan_inclusive_add(long x);
-long __ovld __conv work_group_scan_inclusive_min(long x);
-long __ovld __conv work_group_scan_inclusive_max(long x);
-ulong __ovld __conv work_group_reduce_add(ulong x);
-ulong __ovld __conv work_group_reduce_min(ulong x);
-ulong __ovld __conv work_group_reduce_max(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_add(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_min(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_max(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_add(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_min(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_max(ulong x);
-float __ovld __conv work_group_reduce_add(float x);
-float __ovld __conv work_group_reduce_min(float x);
-float __ovld __conv work_group_reduce_max(float x);
-float __ovld __conv work_group_scan_exclusive_add(float x);
-float __ovld __conv work_group_scan_exclusive_min(float x);
-float __ovld __conv work_group_scan_exclusive_max(float x);
-float __ovld __conv work_group_scan_inclusive_add(float x);
-float __ovld __conv work_group_scan_inclusive_min(float x);
-float __ovld __conv work_group_scan_inclusive_max(float x);
-#ifdef cl_khr_fp64
-double __ovld __conv work_group_reduce_add(double x);
-double __ovld __conv work_group_reduce_min(double x);
-double __ovld __conv work_group_reduce_max(double x);
-double __ovld __conv work_group_scan_exclusive_add(double x);
-double __ovld __conv work_group_scan_exclusive_min(double x);
-double __ovld __conv work_group_scan_exclusive_max(double x);
-double __ovld __conv work_group_scan_inclusive_add(double x);
-double __ovld __conv work_group_scan_inclusive_min(double x);
-double __ovld __conv work_group_scan_inclusive_max(double x);
-#endif //cl_khr_fp64
-
-#endif //defined(__opencl_c_work_group_collective_functions)
-
-// OpenCL v2.0 s6.13.16 - Pipe Functions
-#if defined(__opencl_c_pipes)
-bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
-#endif //defined(__opencl_c_pipes)
-
-
-// OpenCL v2.0 s6.13.17 - Enqueue Kernels
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-ndrange_t __ovld ndrange_1D(size_t);
-ndrange_t __ovld ndrange_1D(size_t, size_t);
-ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
-
-ndrange_t __ovld ndrange_2D(const size_t[2]);
-ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2]);
-ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2], const size_t[2]);
-
-ndrange_t __ovld ndrange_3D(const size_t[3]);
-ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3]);
-ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3], const size_t[3]);
-
-int __ovld enqueue_marker(queue_t, uint, const clk_event_t*, clk_event_t*);
-
-void __ovld retain_event(clk_event_t);
-
-void __ovld release_event(clk_event_t);
-
-clk_event_t __ovld create_user_event(void);
-
-void __ovld set_user_event_status(clk_event_t e, int state);
-
-bool __ovld is_valid_event (clk_event_t event);
-
-void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
-
-queue_t __ovld get_default_queue(void);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL Extension v2.0 s9.17 - Sub-groups
-
-#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
-// Shared Sub Group Functions
-uint    __ovld get_sub_group_size(void);
-uint    __ovld get_max_sub_group_size(void);
-uint    __ovld get_num_sub_groups(void);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint    __ovld get_enqueued_num_sub_groups(void);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint    __ovld get_sub_group_id(void);
-uint    __ovld get_sub_group_local_id(void);
-
-void    __ovld __conv sub_group_barrier(cl_mem_fence_flags flags);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void    __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-int     __ovld __conv sub_group_all(int predicate);
-int     __ovld __conv sub_group_any(int predicate);
-
-int     __ovld __conv sub_group_broadcast(int   x, uint sub_group_local_id);
-uint    __ovld __conv sub_group_broadcast(uint  x, uint sub_group_local_id);
-long    __ovld __conv sub_group_broadcast(long  x, uint sub_group_local_id);
-ulong   __ovld __conv sub_group_broadcast(ulong x, uint sub_group_local_id);
-float   __ovld __conv sub_group_broadcast(float x, uint sub_group_local_id);
-
-int     __ovld __conv sub_group_reduce_add(int   x);
-uint    __ovld __conv sub_group_reduce_add(uint  x);
-long    __ovld __conv sub_group_reduce_add(long  x);
-ulong   __ovld __conv sub_group_reduce_add(ulong x);
-float   __ovld __conv sub_group_reduce_add(float x);
-int     __ovld __conv sub_group_reduce_min(int   x);
-uint    __ovld __conv sub_group_reduce_min(uint  x);
-long    __ovld __conv sub_group_reduce_min(long  x);
-ulong   __ovld __conv sub_group_reduce_min(ulong x);
-float   __ovld __conv sub_group_reduce_min(float x);
-int     __ovld __conv sub_group_reduce_max(int   x);
-uint    __ovld __conv sub_group_reduce_max(uint  x);
-long    __ovld __conv sub_group_reduce_max(long  x);
-ulong   __ovld __conv sub_group_reduce_max(ulong x);
-float   __ovld __conv sub_group_reduce_max(float x);
-
-int     __ovld __conv sub_group_scan_exclusive_add(int   x);
-uint    __ovld __conv sub_group_scan_exclusive_add(uint  x);
-long    __ovld __conv sub_group_scan_exclusive_add(long  x);
-ulong   __ovld __conv sub_group_scan_exclusive_add(ulong x);
-float   __ovld __conv sub_group_scan_exclusive_add(float x);
-int     __ovld __conv sub_group_scan_exclusive_min(int   x);
-uint    __ovld __conv sub_group_scan_exclusive_min(uint  x);
-long    __ovld __conv sub_group_scan_exclusive_min(long  x);
-ulong   __ovld __conv sub_group_scan_exclusive_min(ulong x);
-float   __ovld __conv sub_group_scan_exclusive_min(float x);
-int     __ovld __conv sub_group_scan_exclusive_max(int   x);
-uint    __ovld __conv sub_group_scan_exclusive_max(uint  x);
-long    __ovld __conv sub_group_scan_exclusive_max(long  x);
-ulong   __ovld __conv sub_group_scan_exclusive_max(ulong x);
-float   __ovld __conv sub_group_scan_exclusive_max(float x);
-
-int     __ovld __conv sub_group_scan_inclusive_add(int   x);
-uint    __ovld __conv sub_group_scan_inclusive_add(uint  x);
-long    __ovld __conv sub_group_scan_inclusive_add(long  x);
-ulong   __ovld __conv sub_group_scan_inclusive_add(ulong x);
-float   __ovld __conv sub_group_scan_inclusive_add(float x);
-int     __ovld __conv sub_group_scan_inclusive_min(int   x);
-uint    __ovld __conv sub_group_scan_inclusive_min(uint  x);
-long    __ovld __conv sub_group_scan_inclusive_min(long  x);
-ulong   __ovld __conv sub_group_scan_inclusive_min(ulong x);
-float   __ovld __conv sub_group_scan_inclusive_min(float x);
-int     __ovld __conv sub_group_scan_inclusive_max(int   x);
-uint    __ovld __conv sub_group_scan_inclusive_max(uint  x);
-long    __ovld __conv sub_group_scan_inclusive_max(long  x);
-ulong   __ovld __conv sub_group_scan_inclusive_max(ulong x);
-float   __ovld __conv sub_group_scan_inclusive_max(float x);
-
-#ifdef cl_khr_fp16
-half    __ovld __conv sub_group_broadcast(half x, uint sub_group_local_id);
-half    __ovld __conv sub_group_reduce_add(half x);
-half    __ovld __conv sub_group_reduce_min(half x);
-half    __ovld __conv sub_group_reduce_max(half x);
-half    __ovld __conv sub_group_scan_exclusive_add(half x);
-half    __ovld __conv sub_group_scan_exclusive_min(half x);
-half    __ovld __conv sub_group_scan_exclusive_max(half x);
-half    __ovld __conv sub_group_scan_inclusive_add(half x);
-half    __ovld __conv sub_group_scan_inclusive_min(half x);
-half    __ovld __conv sub_group_scan_inclusive_max(half x);
-#endif //cl_khr_fp16
-
-#ifdef cl_khr_fp64
-double  __ovld __conv sub_group_broadcast(double x, uint sub_group_local_id);
-double  __ovld __conv sub_group_reduce_add(double x);
-double  __ovld __conv sub_group_reduce_min(double x);
-double  __ovld __conv sub_group_reduce_max(double x);
-double  __ovld __conv sub_group_scan_exclusive_add(double x);
-double  __ovld __conv sub_group_scan_exclusive_min(double x);
-double  __ovld __conv sub_group_scan_exclusive_max(double x);
-double  __ovld __conv sub_group_scan_inclusive_add(double x);
-double  __ovld __conv sub_group_scan_inclusive_min(double x);
-double  __ovld __conv sub_group_scan_inclusive_max(double x);
-#endif //cl_khr_fp64
-
-#endif //cl_khr_subgroups cl_intel_subgroups __opencl_c_subgroups
-
-#if defined(cl_khr_subgroup_extended_types)
-char __ovld __conv sub_group_broadcast( char value, uint index );
-char2 __ovld __conv sub_group_broadcast( char2 value, uint index );
-char3 __ovld __conv sub_group_broadcast( char3 value, uint index );
-char4 __ovld __conv sub_group_broadcast( char4 value, uint index );
-char8 __ovld __conv sub_group_broadcast( char8 value, uint index );
-char16 __ovld __conv sub_group_broadcast( char16 value, uint index );
-
-uchar __ovld __conv sub_group_broadcast( uchar value, uint index );
-uchar2 __ovld __conv sub_group_broadcast( uchar2 value, uint index );
-uchar3 __ovld __conv sub_group_broadcast( uchar3 value, uint index );
-uchar4 __ovld __conv sub_group_broadcast( uchar4 value, uint index );
-uchar8 __ovld __conv sub_group_broadcast( uchar8 value, uint index );
-uchar16 __ovld __conv sub_group_broadcast( uchar16 value, uint index );
-
-short __ovld __conv sub_group_broadcast( short value, uint index );
-short2 __ovld __conv sub_group_broadcast( short2 value, uint index );
-short3 __ovld __conv sub_group_broadcast( short3 value, uint index );
-short4 __ovld __conv sub_group_broadcast( short4 value, uint index );
-short8 __ovld __conv sub_group_broadcast( short8 value, uint index );
-short16 __ovld __conv sub_group_broadcast( short16 value, uint index );
-
-ushort __ovld __conv sub_group_broadcast( ushort value, uint index );
-ushort2 __ovld __conv sub_group_broadcast( ushort2 value, uint index );
-ushort3 __ovld __conv sub_group_broadcast( ushort3 value, uint index );
-ushort4 __ovld __conv sub_group_broadcast( ushort4 value, uint index );
-ushort8 __ovld __conv sub_group_broadcast( ushort8 value, uint index );
-ushort16 __ovld __conv sub_group_broadcast( ushort16 value, uint index );
-
-// scalar int broadcast is part of cl_khr_subgroups
-int2 __ovld __conv sub_group_broadcast( int2 value, uint index );
-int3 __ovld __conv sub_group_broadcast( int3 value, uint index );
-int4 __ovld __conv sub_group_broadcast( int4 value, uint index );
-int8 __ovld __conv sub_group_broadcast( int8 value, uint index );
-int16 __ovld __conv sub_group_broadcast( int16 value, uint index );
-
-// scalar uint broadcast is part of cl_khr_subgroups
-uint2 __ovld __conv sub_group_broadcast( uint2 value, uint index );
-uint3 __ovld __conv sub_group_broadcast( uint3 value, uint index );
-uint4 __ovld __conv sub_group_broadcast( uint4 value, uint index );
-uint8 __ovld __conv sub_group_broadcast( uint8 value, uint index );
-uint16 __ovld __conv sub_group_broadcast( uint16 value, uint index );
-
-// scalar long broadcast is part of cl_khr_subgroups
-long2 __ovld __conv sub_group_broadcast( long2 value, uint index );
-long3 __ovld __conv sub_group_broadcast( long3 value, uint index );
-long4 __ovld __conv sub_group_broadcast( long4 value, uint index );
-long8 __ovld __conv sub_group_broadcast( long8 value, uint index );
-long16 __ovld __conv sub_group_broadcast( long16 value, uint index );
-
-// scalar ulong broadcast is part of cl_khr_subgroups
-ulong2 __ovld __conv sub_group_broadcast( ulong2 value, uint index );
-ulong3 __ovld __conv sub_group_broadcast( ulong3 value, uint index );
-ulong4 __ovld __conv sub_group_broadcast( ulong4 value, uint index );
-ulong8 __ovld __conv sub_group_broadcast( ulong8 value, uint index );
-ulong16 __ovld __conv sub_group_broadcast( ulong16 value, uint index );
-
-// scalar float broadcast is part of cl_khr_subgroups
-float2 __ovld __conv sub_group_broadcast( float2 value, uint index );
-float3 __ovld __conv sub_group_broadcast( float3 value, uint index );
-float4 __ovld __conv sub_group_broadcast( float4 value, uint index );
-float8 __ovld __conv sub_group_broadcast( float8 value, uint index );
-float16 __ovld __conv sub_group_broadcast( float16 value, uint index );
-
-char __ovld __conv sub_group_reduce_add( char value );
-uchar __ovld __conv sub_group_reduce_add( uchar value );
-short __ovld __conv sub_group_reduce_add( short value );
-ushort __ovld __conv sub_group_reduce_add( ushort value );
-
-char __ovld __conv sub_group_reduce_min( char value );
-uchar __ovld __conv sub_group_reduce_min( uchar value );
-short __ovld __conv sub_group_reduce_min( short value );
-ushort __ovld __conv sub_group_reduce_min( ushort value );
-
-char __ovld __conv sub_group_reduce_max( char value );
-uchar __ovld __conv sub_group_reduce_max( uchar value );
-short __ovld __conv sub_group_reduce_max( short value );
-ushort __ovld __conv sub_group_reduce_max( ushort value );
-
-char __ovld __conv sub_group_scan_inclusive_add( char value );
-uchar __ovld __conv sub_group_scan_inclusive_add( uchar value );
-short __ovld __conv sub_group_scan_inclusive_add( short value );
-ushort __ovld __conv sub_group_scan_inclusive_add( ushort value );
-
-char __ovld __conv sub_group_scan_inclusive_min( char value );
-uchar __ovld __conv sub_group_scan_inclusive_min( uchar value );
-short __ovld __conv sub_group_scan_inclusive_min( short value );
-ushort __ovld __conv sub_group_scan_inclusive_min( ushort value );
-
-char __ovld __conv sub_group_scan_inclusive_max( char value );
-uchar __ovld __conv sub_group_scan_inclusive_max( uchar value );
-short __ovld __conv sub_group_scan_inclusive_max( short value );
-ushort __ovld __conv sub_group_scan_inclusive_max( ushort value );
-
-char __ovld __conv sub_group_scan_exclusive_add( char value );
-uchar __ovld __conv sub_group_scan_exclusive_add( uchar value );
-short __ovld __conv sub_group_scan_exclusive_add( short value );
-ushort __ovld __conv sub_group_scan_exclusive_add( ushort value );
-
-char __ovld __conv sub_group_scan_exclusive_min( char value );
-uchar __ovld __conv sub_group_scan_exclusive_min( uchar value );
-short __ovld __conv sub_group_scan_exclusive_min( short value );
-ushort __ovld __conv sub_group_scan_exclusive_min( ushort value );
-
-char __ovld __conv sub_group_scan_exclusive_max( char value );
-uchar __ovld __conv sub_group_scan_exclusive_max( uchar value );
-short __ovld __conv sub_group_scan_exclusive_max( short value );
-ushort __ovld __conv sub_group_scan_exclusive_max( ushort value );
-
-#if defined(cl_khr_fp16)
-// scalar half broadcast is part of cl_khr_subgroups
-half2 __ovld __conv sub_group_broadcast( half2 value, uint index );
-half3 __ovld __conv sub_group_broadcast( half3 value, uint index );
-half4 __ovld __conv sub_group_broadcast( half4 value, uint index );
-half8 __ovld __conv sub_group_broadcast( half8 value, uint index );
-half16 __ovld __conv sub_group_broadcast( half16 value, uint index );
-#endif  // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-// scalar double broadcast is part of cl_khr_subgroups
-double2 __ovld __conv sub_group_broadcast( double2 value, uint index );
-double3 __ovld __conv sub_group_broadcast( double3 value, uint index );
-double4 __ovld __conv sub_group_broadcast( double4 value, uint index );
-double8 __ovld __conv sub_group_broadcast( double8 value, uint index );
-double16 __ovld __conv sub_group_broadcast( double16 value, uint index );
-#endif  // cl_khr_fp64
-
-#endif  // cl_khr_subgroup_extended_types
-
-#if defined(cl_khr_subgroup_non_uniform_vote)
-int     __ovld sub_group_elect(void);
-int     __ovld sub_group_non_uniform_all( int predicate );
-int     __ovld sub_group_non_uniform_any( int predicate );
-
-int     __ovld sub_group_non_uniform_all_equal( char value );
-int     __ovld sub_group_non_uniform_all_equal( uchar value );
-int     __ovld sub_group_non_uniform_all_equal( short value );
-int     __ovld sub_group_non_uniform_all_equal( ushort value );
-int     __ovld sub_group_non_uniform_all_equal( int value );
-int     __ovld sub_group_non_uniform_all_equal( uint value );
-int     __ovld sub_group_non_uniform_all_equal( long value );
-int     __ovld sub_group_non_uniform_all_equal( ulong value );
-int     __ovld sub_group_non_uniform_all_equal( float value );
-
-#if defined(cl_khr_fp16)
-int     __ovld sub_group_non_uniform_all_equal( half value );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-int     __ovld sub_group_non_uniform_all_equal( double value );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_non_uniform_vote
-
-#if defined(cl_khr_subgroup_ballot)
-char    __ovld sub_group_non_uniform_broadcast( char value, uint index );
-char2   __ovld sub_group_non_uniform_broadcast( char2 value, uint index );
-char3   __ovld sub_group_non_uniform_broadcast( char3 value, uint index );
-char4   __ovld sub_group_non_uniform_broadcast( char4 value, uint index );
-char8   __ovld sub_group_non_uniform_broadcast( char8 value, uint index );
-char16  __ovld sub_group_non_uniform_broadcast( char16 value, uint index );
-
-uchar   __ovld sub_group_non_uniform_broadcast( uchar value, uint index );
-uchar2  __ovld sub_group_non_uniform_broadcast( uchar2 value, uint index );
-uchar3  __ovld sub_group_non_uniform_broadcast( uchar3 value, uint index );
-uchar4  __ovld sub_group_non_uniform_broadcast( uchar4 value, uint index );
-uchar8  __ovld sub_group_non_uniform_broadcast( uchar8 value, uint index );
-uchar16 __ovld sub_group_non_uniform_broadcast( uchar16 value, uint index );
-
-short   __ovld sub_group_non_uniform_broadcast( short value, uint index );
-short2  __ovld sub_group_non_uniform_broadcast( short2 value, uint index );
-short3  __ovld sub_group_non_uniform_broadcast( short3 value, uint index );
-short4  __ovld sub_group_non_uniform_broadcast( short4 value, uint index );
-short8  __ovld sub_group_non_uniform_broadcast( short8 value, uint index );
-short16 __ovld sub_group_non_uniform_broadcast( short16 value, uint index );
-
-ushort  __ovld sub_group_non_uniform_broadcast( ushort value, uint index );
-ushort2 __ovld sub_group_non_uniform_broadcast( ushort2 value, uint index );
-ushort3 __ovld sub_group_non_uniform_broadcast( ushort3 value, uint index );
-ushort4 __ovld sub_group_non_uniform_broadcast( ushort4 value, uint index );
-ushort8 __ovld sub_group_non_uniform_broadcast( ushort8 value, uint index );
-ushort16 __ovld sub_group_non_uniform_broadcast( ushort16 value, uint index );
-
-int     __ovld sub_group_non_uniform_broadcast( int value, uint index );
-int2    __ovld sub_group_non_uniform_broadcast( int2 value, uint index );
-int3    __ovld sub_group_non_uniform_broadcast( int3 value, uint index );
-int4    __ovld sub_group_non_uniform_broadcast( int4 value, uint index );
-int8    __ovld sub_group_non_uniform_broadcast( int8 value, uint index );
-int16   __ovld sub_group_non_uniform_broadcast( int16 value, uint index );
-
-uint    __ovld sub_group_non_uniform_broadcast( uint value, uint index );
-uint2   __ovld sub_group_non_uniform_broadcast( uint2 value, uint index );
-uint3   __ovld sub_group_non_uniform_broadcast( uint3 value, uint index );
-uint4   __ovld sub_group_non_uniform_broadcast( uint4 value, uint index );
-uint8   __ovld sub_group_non_uniform_broadcast( uint8 value, uint index );
-uint16  __ovld sub_group_non_uniform_broadcast( uint16 value, uint index );
-
-long    __ovld sub_group_non_uniform_broadcast( long value, uint index );
-long2   __ovld sub_group_non_uniform_broadcast( long2 value, uint index );
-long3   __ovld sub_group_non_uniform_broadcast( long3 value, uint index );
-long4   __ovld sub_group_non_uniform_broadcast( long4 value, uint index );
-long8   __ovld sub_group_non_uniform_broadcast( long8 value, uint index );
-long16  __ovld sub_group_non_uniform_broadcast( long16 value, uint index );
-
-ulong   __ovld sub_group_non_uniform_broadcast( ulong value, uint index );
-ulong2  __ovld sub_group_non_uniform_broadcast( ulong2 value, uint index );
-ulong3  __ovld sub_group_non_uniform_broadcast( ulong3 value, uint index );
-ulong4  __ovld sub_group_non_uniform_broadcast( ulong4 value, uint index );
-ulong8  __ovld sub_group_non_uniform_broadcast( ulong8 value, uint index );
-ulong16 __ovld sub_group_non_uniform_broadcast( ulong16 value, uint index );
-
-float   __ovld sub_group_non_uniform_broadcast( float value, uint index );
-float2  __ovld sub_group_non_uniform_broadcast( float2 value, uint index );
-float3  __ovld sub_group_non_uniform_broadcast( float3 value, uint index );
-float4  __ovld sub_group_non_uniform_broadcast( float4 value, uint index );
-float8  __ovld sub_group_non_uniform_broadcast( float8 value, uint index );
-float16 __ovld sub_group_non_uniform_broadcast( float16 value, uint index );
-
-char    __ovld sub_group_broadcast_first( char value );
-uchar   __ovld sub_group_broadcast_first( uchar value );
-short   __ovld sub_group_broadcast_first( short value );
-ushort  __ovld sub_group_broadcast_first( ushort value );
-int     __ovld sub_group_broadcast_first( int value );
-uint    __ovld sub_group_broadcast_first( uint value );
-long    __ovld sub_group_broadcast_first( long value );
-ulong   __ovld sub_group_broadcast_first( ulong value );
-float   __ovld sub_group_broadcast_first( float value );
-
-uint4   __ovld sub_group_ballot( int predicate );
-int     __ovld __cnfn sub_group_inverse_ballot( uint4 value );
-int     __ovld __cnfn sub_group_ballot_bit_extract( uint4 value, uint index );
-uint    __ovld __cnfn sub_group_ballot_bit_count( uint4 value );
-
-uint    __ovld sub_group_ballot_inclusive_scan( uint4 value );
-uint    __ovld sub_group_ballot_exclusive_scan( uint4 value );
-uint    __ovld sub_group_ballot_find_lsb( uint4 value );
-uint    __ovld sub_group_ballot_find_msb( uint4 value );
-
-uint4   __ovld __cnfn get_sub_group_eq_mask(void);
-uint4   __ovld __cnfn get_sub_group_ge_mask(void);
-uint4   __ovld __cnfn get_sub_group_gt_mask(void);
-uint4   __ovld __cnfn get_sub_group_le_mask(void);
-uint4   __ovld __cnfn get_sub_group_lt_mask(void);
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_non_uniform_broadcast( half value, uint index );
-half2   __ovld sub_group_non_uniform_broadcast( half2 value, uint index );
-half3   __ovld sub_group_non_uniform_broadcast( half3 value, uint index );
-half4   __ovld sub_group_non_uniform_broadcast( half4 value, uint index );
-half8   __ovld sub_group_non_uniform_broadcast( half8 value, uint index );
-half16  __ovld sub_group_non_uniform_broadcast( half16 value, uint index );
-
-half    __ovld sub_group_broadcast_first( half value );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double   __ovld sub_group_non_uniform_broadcast( double value, uint index );
-double2  __ovld sub_group_non_uniform_broadcast( double2 value, uint index );
-double3  __ovld sub_group_non_uniform_broadcast( double3 value, uint index );
-double4  __ovld sub_group_non_uniform_broadcast( double4 value, uint index );
-double8  __ovld sub_group_non_uniform_broadcast( double8 value, uint index );
-double16 __ovld sub_group_non_uniform_broadcast( double16 value, uint index );
-
-double   __ovld sub_group_broadcast_first( double value );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_ballot
-
-#if defined(cl_khr_subgroup_non_uniform_arithmetic)
-char    __ovld sub_group_non_uniform_reduce_add( char value );
-uchar   __ovld sub_group_non_uniform_reduce_add( uchar value );
-short   __ovld sub_group_non_uniform_reduce_add( short value );
-ushort  __ovld sub_group_non_uniform_reduce_add( ushort value );
-int     __ovld sub_group_non_uniform_reduce_add( int value );
-uint    __ovld sub_group_non_uniform_reduce_add( uint value );
-long    __ovld sub_group_non_uniform_reduce_add( long value );
-ulong   __ovld sub_group_non_uniform_reduce_add( ulong value );
-float   __ovld sub_group_non_uniform_reduce_add( float value );
-
-char    __ovld sub_group_non_uniform_reduce_mul( char value );
-uchar   __ovld sub_group_non_uniform_reduce_mul( uchar value );
-short   __ovld sub_group_non_uniform_reduce_mul( short value );
-ushort  __ovld sub_group_non_uniform_reduce_mul( ushort value );
-int     __ovld sub_group_non_uniform_reduce_mul( int value );
-uint    __ovld sub_group_non_uniform_reduce_mul( uint value );
-long    __ovld sub_group_non_uniform_reduce_mul( long value );
-ulong   __ovld sub_group_non_uniform_reduce_mul( ulong value );
-float   __ovld sub_group_non_uniform_reduce_mul( float value );
-
-char    __ovld sub_group_non_uniform_reduce_min( char value );
-uchar   __ovld sub_group_non_uniform_reduce_min( uchar value );
-short   __ovld sub_group_non_uniform_reduce_min( short value );
-ushort  __ovld sub_group_non_uniform_reduce_min( ushort value );
-int     __ovld sub_group_non_uniform_reduce_min( int value );
-uint    __ovld sub_group_non_uniform_reduce_min( uint value );
-long    __ovld sub_group_non_uniform_reduce_min( long value );
-ulong   __ovld sub_group_non_uniform_reduce_min( ulong value );
-float   __ovld sub_group_non_uniform_reduce_min( float value );
-
-char    __ovld sub_group_non_uniform_reduce_max( char value );
-uchar   __ovld sub_group_non_uniform_reduce_max( uchar value );
-short   __ovld sub_group_non_uniform_reduce_max( short value );
-ushort  __ovld sub_group_non_uniform_reduce_max( ushort value );
-int     __ovld sub_group_non_uniform_reduce_max( int value );
-uint    __ovld sub_group_non_uniform_reduce_max( uint value );
-long    __ovld sub_group_non_uniform_reduce_max( long value );
-ulong   __ovld sub_group_non_uniform_reduce_max( ulong value );
-float   __ovld sub_group_non_uniform_reduce_max( float value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_add( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_add( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_add( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_add( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_add( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_add( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_add( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_add( ulong value );
-float   __ovld sub_group_non_uniform_scan_inclusive_add( float value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_mul( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_mul( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_mul( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_mul( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_mul( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_mul( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_mul( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_mul( ulong value );
-float   __ovld sub_group_non_uniform_scan_inclusive_mul( float value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_min( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_min( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_min( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_min( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_min( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_min( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_min( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_min( ulong value );
-float   __ovld sub_group_non_uniform_scan_inclusive_min( float value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_max( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_max( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_max( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_max( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_max( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_max( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_max( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_max( ulong value );
-float   __ovld sub_group_non_uniform_scan_inclusive_max( float value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_add( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_add( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_add( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_add( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_add( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_add( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_add( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_add( ulong value );
-float   __ovld sub_group_non_uniform_scan_exclusive_add( float value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_mul( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_mul( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_mul( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_mul( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_mul( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_mul( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_mul( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_mul( ulong value );
-float   __ovld sub_group_non_uniform_scan_exclusive_mul( float value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_min( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_min( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_min( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_min( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_min( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_min( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_min( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_min( ulong value );
-float   __ovld sub_group_non_uniform_scan_exclusive_min( float value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_max( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_max( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_max( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_max( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_max( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_max( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_max( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_max( ulong value );
-float   __ovld sub_group_non_uniform_scan_exclusive_max( float value );
-
-char    __ovld sub_group_non_uniform_reduce_and( char value );
-uchar   __ovld sub_group_non_uniform_reduce_and( uchar value );
-short   __ovld sub_group_non_uniform_reduce_and( short value );
-ushort  __ovld sub_group_non_uniform_reduce_and( ushort value );
-int     __ovld sub_group_non_uniform_reduce_and( int value );
-uint    __ovld sub_group_non_uniform_reduce_and( uint value );
-long    __ovld sub_group_non_uniform_reduce_and( long value );
-ulong   __ovld sub_group_non_uniform_reduce_and( ulong value );
-
-char    __ovld sub_group_non_uniform_reduce_or( char value );
-uchar   __ovld sub_group_non_uniform_reduce_or( uchar value );
-short   __ovld sub_group_non_uniform_reduce_or( short value );
-ushort  __ovld sub_group_non_uniform_reduce_or( ushort value );
-int     __ovld sub_group_non_uniform_reduce_or( int value );
-uint    __ovld sub_group_non_uniform_reduce_or( uint value );
-long    __ovld sub_group_non_uniform_reduce_or( long value );
-ulong   __ovld sub_group_non_uniform_reduce_or( ulong value );
-
-char    __ovld sub_group_non_uniform_reduce_xor( char value );
-uchar   __ovld sub_group_non_uniform_reduce_xor( uchar value );
-short   __ovld sub_group_non_uniform_reduce_xor( short value );
-ushort  __ovld sub_group_non_uniform_reduce_xor( ushort value );
-int     __ovld sub_group_non_uniform_reduce_xor( int value );
-uint    __ovld sub_group_non_uniform_reduce_xor( uint value );
-long    __ovld sub_group_non_uniform_reduce_xor( long value );
-ulong   __ovld sub_group_non_uniform_reduce_xor( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_and( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_and( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_and( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_and( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_and( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_and( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_and( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_and( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_or( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_or( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_or( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_or( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_or( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_or( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_or( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_or( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_inclusive_xor( char value );
-uchar   __ovld sub_group_non_uniform_scan_inclusive_xor( uchar value );
-short   __ovld sub_group_non_uniform_scan_inclusive_xor( short value );
-ushort  __ovld sub_group_non_uniform_scan_inclusive_xor( ushort value );
-int     __ovld sub_group_non_uniform_scan_inclusive_xor( int value );
-uint    __ovld sub_group_non_uniform_scan_inclusive_xor( uint value );
-long    __ovld sub_group_non_uniform_scan_inclusive_xor( long value );
-ulong   __ovld sub_group_non_uniform_scan_inclusive_xor( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_and( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_and( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_and( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_and( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_and( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_and( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_and( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_and( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_or( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_or( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_or( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_or( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_or( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_or( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_or( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_or( ulong value );
-
-char    __ovld sub_group_non_uniform_scan_exclusive_xor( char value );
-uchar   __ovld sub_group_non_uniform_scan_exclusive_xor( uchar value );
-short   __ovld sub_group_non_uniform_scan_exclusive_xor( short value );
-ushort  __ovld sub_group_non_uniform_scan_exclusive_xor( ushort value );
-int     __ovld sub_group_non_uniform_scan_exclusive_xor( int value );
-uint    __ovld sub_group_non_uniform_scan_exclusive_xor( uint value );
-long    __ovld sub_group_non_uniform_scan_exclusive_xor( long value );
-ulong   __ovld sub_group_non_uniform_scan_exclusive_xor( ulong value );
-
-int     __ovld sub_group_non_uniform_reduce_logical_and( int predicate );
-int     __ovld sub_group_non_uniform_reduce_logical_or( int predicate );
-int     __ovld sub_group_non_uniform_reduce_logical_xor( int predicate );
-
-int     __ovld sub_group_non_uniform_scan_inclusive_logical_and( int predicate );
-int     __ovld sub_group_non_uniform_scan_inclusive_logical_or( int predicate );
-int     __ovld sub_group_non_uniform_scan_inclusive_logical_xor( int predicate );
-
-int     __ovld sub_group_non_uniform_scan_exclusive_logical_and( int predicate );
-int     __ovld sub_group_non_uniform_scan_exclusive_logical_or( int predicate );
-int     __ovld sub_group_non_uniform_scan_exclusive_logical_xor( int predicate );
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_non_uniform_reduce_add( half value );
-half    __ovld sub_group_non_uniform_reduce_mul( half value );
-half    __ovld sub_group_non_uniform_reduce_min( half value );
-half    __ovld sub_group_non_uniform_reduce_max( half value );
-half    __ovld sub_group_non_uniform_scan_inclusive_add( half value );
-half    __ovld sub_group_non_uniform_scan_inclusive_mul( half value );
-half    __ovld sub_group_non_uniform_scan_inclusive_min( half value );
-half    __ovld sub_group_non_uniform_scan_inclusive_max( half value );
-half    __ovld sub_group_non_uniform_scan_exclusive_add( half value );
-half    __ovld sub_group_non_uniform_scan_exclusive_mul( half value );
-half    __ovld sub_group_non_uniform_scan_exclusive_min( half value );
-half    __ovld sub_group_non_uniform_scan_exclusive_max( half value );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double  __ovld sub_group_non_uniform_reduce_add( double value );
-double  __ovld sub_group_non_uniform_reduce_mul( double value );
-double  __ovld sub_group_non_uniform_reduce_min( double value );
-double  __ovld sub_group_non_uniform_reduce_max( double value );
-double  __ovld sub_group_non_uniform_scan_inclusive_add( double value );
-double  __ovld sub_group_non_uniform_scan_inclusive_mul( double value );
-double  __ovld sub_group_non_uniform_scan_inclusive_min( double value );
-double  __ovld sub_group_non_uniform_scan_inclusive_max( double value );
-double  __ovld sub_group_non_uniform_scan_exclusive_add( double value );
-double  __ovld sub_group_non_uniform_scan_exclusive_mul( double value );
-double  __ovld sub_group_non_uniform_scan_exclusive_min( double value );
-double  __ovld sub_group_non_uniform_scan_exclusive_max( double value );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_non_uniform_arithmetic
-
-#if defined(cl_khr_subgroup_shuffle)
-char    __ovld sub_group_shuffle( char value, uint index );
-uchar   __ovld sub_group_shuffle( uchar value, uint index );
-short   __ovld sub_group_shuffle( short value, uint index );
-ushort  __ovld sub_group_shuffle( ushort value, uint index );
-int     __ovld sub_group_shuffle( int value, uint index );
-uint    __ovld sub_group_shuffle( uint value, uint index );
-long    __ovld sub_group_shuffle( long value, uint index );
-ulong   __ovld sub_group_shuffle( ulong value, uint index );
-float   __ovld sub_group_shuffle( float value, uint index );
-
-char    __ovld sub_group_shuffle_xor( char value, uint mask );
-uchar   __ovld sub_group_shuffle_xor( uchar value, uint mask );
-short   __ovld sub_group_shuffle_xor( short value, uint mask );
-ushort  __ovld sub_group_shuffle_xor( ushort value, uint mask );
-int     __ovld sub_group_shuffle_xor( int value, uint mask );
-uint    __ovld sub_group_shuffle_xor( uint value, uint mask );
-long    __ovld sub_group_shuffle_xor( long value, uint mask );
-ulong   __ovld sub_group_shuffle_xor( ulong value, uint mask );
-float   __ovld sub_group_shuffle_xor( float value, uint mask );
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_shuffle( half value, uint index );
-half    __ovld sub_group_shuffle_xor( half value, uint mask );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double  __ovld sub_group_shuffle( double value, uint index );
-double  __ovld sub_group_shuffle_xor( double value, uint mask );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_shuffle
-
-#if defined(cl_khr_subgroup_shuffle_relative)
-char    __ovld sub_group_shuffle_up( char value, uint delta );
-uchar   __ovld sub_group_shuffle_up( uchar value, uint delta );
-short   __ovld sub_group_shuffle_up( short value, uint delta );
-ushort  __ovld sub_group_shuffle_up( ushort value, uint delta );
-int     __ovld sub_group_shuffle_up( int value, uint delta );
-uint    __ovld sub_group_shuffle_up( uint value, uint delta );
-long    __ovld sub_group_shuffle_up( long value, uint delta );
-ulong   __ovld sub_group_shuffle_up( ulong value, uint delta );
-float   __ovld sub_group_shuffle_up( float value, uint delta );
-
-char    __ovld sub_group_shuffle_down( char value, uint delta );
-uchar   __ovld sub_group_shuffle_down( uchar value, uint delta );
-short   __ovld sub_group_shuffle_down( short value, uint delta );
-ushort  __ovld sub_group_shuffle_down( ushort value, uint delta );
-int     __ovld sub_group_shuffle_down( int value, uint delta );
-uint    __ovld sub_group_shuffle_down( uint value, uint delta );
-long    __ovld sub_group_shuffle_down( long value, uint delta );
-ulong   __ovld sub_group_shuffle_down( ulong value, uint delta );
-float   __ovld sub_group_shuffle_down( float value, uint delta );
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_shuffle_up( half value, uint delta );
-half    __ovld sub_group_shuffle_down( half value, uint delta );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double  __ovld sub_group_shuffle_up( double value, uint delta );
-double  __ovld sub_group_shuffle_down( double value, uint delta );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_shuffle_relative
-
-#if defined(cl_khr_subgroup_clustered_reduce)
-char    __ovld sub_group_clustered_reduce_add( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_add( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_add( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_add( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_add( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_add( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_add( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_add( ulong value, uint clustersize );
-float   __ovld sub_group_clustered_reduce_add( float value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_mul( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_mul( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_mul( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_mul( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_mul( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_mul( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_mul( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_mul( ulong value, uint clustersize );
-float   __ovld sub_group_clustered_reduce_mul( float value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_min( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_min( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_min( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_min( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_min( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_min( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_min( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_min( ulong value, uint clustersize );
-float   __ovld sub_group_clustered_reduce_min( float value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_max( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_max( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_max( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_max( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_max( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_max( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_max( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_max( ulong value, uint clustersize );
-float   __ovld sub_group_clustered_reduce_max( float value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_and( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_and( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_and( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_and( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_and( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_and( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_and( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_and( ulong value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_or( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_or( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_or( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_or( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_or( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_or( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_or( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_or( ulong value, uint clustersize );
-
-char    __ovld sub_group_clustered_reduce_xor( char value, uint clustersize );
-uchar   __ovld sub_group_clustered_reduce_xor( uchar value, uint clustersize );
-short   __ovld sub_group_clustered_reduce_xor( short value, uint clustersize );
-ushort  __ovld sub_group_clustered_reduce_xor( ushort value, uint clustersize );
-int     __ovld sub_group_clustered_reduce_xor( int value, uint clustersize );
-uint    __ovld sub_group_clustered_reduce_xor( uint value, uint clustersize );
-long    __ovld sub_group_clustered_reduce_xor( long value, uint clustersize );
-ulong   __ovld sub_group_clustered_reduce_xor( ulong value, uint clustersize );
-
-int     __ovld sub_group_clustered_reduce_logical_and( int predicate, uint clustersize );
-int     __ovld sub_group_clustered_reduce_logical_or( int predicate, uint clustersize );
-int     __ovld sub_group_clustered_reduce_logical_xor( int predicate, uint clustersize );
-
-#if defined(cl_khr_fp16)
-half    __ovld sub_group_clustered_reduce_add( half value, uint clustersize );
-half    __ovld sub_group_clustered_reduce_mul( half value, uint clustersize );
-half    __ovld sub_group_clustered_reduce_min( half value, uint clustersize );
-half    __ovld sub_group_clustered_reduce_max( half value, uint clustersize );
-#endif // cl_khr_fp16
-
-#if defined(cl_khr_fp64)
-double  __ovld sub_group_clustered_reduce_add( double value, uint clustersize );
-double  __ovld sub_group_clustered_reduce_mul( double value, uint clustersize );
-double  __ovld sub_group_clustered_reduce_min( double value, uint clustersize );
-double  __ovld sub_group_clustered_reduce_max( double value, uint clustersize );
-#endif // cl_khr_fp64
-
-#endif // cl_khr_subgroup_clustered_reduce
-
-#if defined(cl_khr_extended_bit_ops)
-char __ovld __cnfn bitfield_insert(char, char, uint, uint);
-uchar __ovld __cnfn bitfield_insert(uchar, uchar, uint, uint);
-short __ovld __cnfn bitfield_insert(short, short, uint, uint);
-ushort __ovld __cnfn bitfield_insert(ushort, ushort, uint, uint);
-int __ovld __cnfn bitfield_insert(int, int, uint, uint);
-uint __ovld __cnfn bitfield_insert(uint, uint, uint, uint);
-long __ovld __cnfn bitfield_insert(long, long, uint, uint);
-ulong __ovld __cnfn bitfield_insert(ulong, ulong, uint, uint);
-char2 __ovld __cnfn bitfield_insert(char2, char2, uint, uint);
-uchar2 __ovld __cnfn bitfield_insert(uchar2, uchar2, uint, uint);
-short2 __ovld __cnfn bitfield_insert(short2, short2, uint, uint);
-ushort2 __ovld __cnfn bitfield_insert(ushort2, ushort2, uint, uint);
-int2 __ovld __cnfn bitfield_insert(int2, int2, uint, uint);
-uint2 __ovld __cnfn bitfield_insert(uint2, uint2, uint, uint);
-long2 __ovld __cnfn bitfield_insert(long2, long2, uint, uint);
-ulong2 __ovld __cnfn bitfield_insert(ulong2, ulong2, uint, uint);
-char3 __ovld __cnfn bitfield_insert(char3, char3, uint, uint);
-uchar3 __ovld __cnfn bitfield_insert(uchar3, uchar3, uint, uint);
-short3 __ovld __cnfn bitfield_insert(short3, short3, uint, uint);
-ushort3 __ovld __cnfn bitfield_insert(ushort3, ushort3, uint, uint);
-int3 __ovld __cnfn bitfield_insert(int3, int3, uint, uint);
-uint3 __ovld __cnfn bitfield_insert(uint3, uint3, uint, uint);
-long3 __ovld __cnfn bitfield_insert(long3, long3, uint, uint);
-ulong3 __ovld __cnfn bitfield_insert(ulong3, ulong3, uint, uint);
-char4 __ovld __cnfn bitfield_insert(char4, char4, uint, uint);
-uchar4 __ovld __cnfn bitfield_insert(uchar4, uchar4, uint, uint);
-short4 __ovld __cnfn bitfield_insert(short4, short4, uint, uint);
-ushort4 __ovld __cnfn bitfield_insert(ushort4, ushort4, uint, uint);
-int4 __ovld __cnfn bitfield_insert(int4, int4, uint, uint);
-uint4 __ovld __cnfn bitfield_insert(uint4, uint4, uint, uint);
-long4 __ovld __cnfn bitfield_insert(long4, long4, uint, uint);
-ulong4 __ovld __cnfn bitfield_insert(ulong4, ulong4, uint, uint);
-char8 __ovld __cnfn bitfield_insert(char8, char8, uint, uint);
-uchar8 __ovld __cnfn bitfield_insert(uchar8, uchar8, uint, uint);
-short8 __ovld __cnfn bitfield_insert(short8, short8, uint, uint);
-ushort8 __ovld __cnfn bitfield_insert(ushort8, ushort8, uint, uint);
-int8 __ovld __cnfn bitfield_insert(int8, int8, uint, uint);
-uint8 __ovld __cnfn bitfield_insert(uint8, uint8, uint, uint);
-long8 __ovld __cnfn bitfield_insert(long8, long8, uint, uint);
-ulong8 __ovld __cnfn bitfield_insert(ulong8, ulong8, uint, uint);
-char16 __ovld __cnfn bitfield_insert(char16, char16, uint, uint);
-uchar16 __ovld __cnfn bitfield_insert(uchar16, uchar16, uint, uint);
-short16 __ovld __cnfn bitfield_insert(short16, short16, uint, uint);
-ushort16 __ovld __cnfn bitfield_insert(ushort16, ushort16, uint, uint);
-int16 __ovld __cnfn bitfield_insert(int16, int16, uint, uint);
-uint16 __ovld __cnfn bitfield_insert(uint16, uint16, uint, uint);
-long16 __ovld __cnfn bitfield_insert(long16, long16, uint, uint);
-ulong16 __ovld __cnfn bitfield_insert(ulong16, ulong16, uint, uint);
-
-char __ovld __cnfn bitfield_extract_signed(char, uint, uint);
-short __ovld __cnfn bitfield_extract_signed(short, uint, uint);
-int __ovld __cnfn bitfield_extract_signed(int, uint, uint);
-long __ovld __cnfn bitfield_extract_signed(long, uint, uint);
-char2 __ovld __cnfn bitfield_extract_signed(char2, uint, uint);
-short2 __ovld __cnfn bitfield_extract_signed(short2, uint, uint);
-int2 __ovld __cnfn bitfield_extract_signed(int2, uint, uint);
-long2 __ovld __cnfn bitfield_extract_signed(long2, uint, uint);
-char3 __ovld __cnfn bitfield_extract_signed(char3, uint, uint);
-short3 __ovld __cnfn bitfield_extract_signed(short3, uint, uint);
-int3 __ovld __cnfn bitfield_extract_signed(int3, uint, uint);
-long3 __ovld __cnfn bitfield_extract_signed(long3, uint, uint);
-char4 __ovld __cnfn bitfield_extract_signed(char4, uint, uint);
-short4 __ovld __cnfn bitfield_extract_signed(short4, uint, uint);
-int4 __ovld __cnfn bitfield_extract_signed(int4, uint, uint);
-long4 __ovld __cnfn bitfield_extract_signed(long4, uint, uint);
-char8 __ovld __cnfn bitfield_extract_signed(char8, uint, uint);
-short8 __ovld __cnfn bitfield_extract_signed(short8, uint, uint);
-int8 __ovld __cnfn bitfield_extract_signed(int8, uint, uint);
-long8 __ovld __cnfn bitfield_extract_signed(long8, uint, uint);
-char16 __ovld __cnfn bitfield_extract_signed(char16, uint, uint);
-short16 __ovld __cnfn bitfield_extract_signed(short16, uint, uint);
-int16 __ovld __cnfn bitfield_extract_signed(int16, uint, uint);
-long16 __ovld __cnfn bitfield_extract_signed(long16, uint, uint);
-
-char __ovld __cnfn bitfield_extract_signed(uchar, uint, uint);
-short __ovld __cnfn bitfield_extract_signed(ushort, uint, uint);
-int __ovld __cnfn bitfield_extract_signed(uint, uint, uint);
-long __ovld __cnfn bitfield_extract_signed(ulong, uint, uint);
-char2 __ovld __cnfn bitfield_extract_signed(uchar2, uint, uint);
-short2 __ovld __cnfn bitfield_extract_signed(ushort2, uint, uint);
-int2 __ovld __cnfn bitfield_extract_signed(uint2, uint, uint);
-long2 __ovld __cnfn bitfield_extract_signed(ulong2, uint, uint);
-char3 __ovld __cnfn bitfield_extract_signed(uchar3, uint, uint);
-short3 __ovld __cnfn bitfield_extract_signed(ushort3, uint, uint);
-int3 __ovld __cnfn bitfield_extract_signed(uint3, uint, uint);
-long3 __ovld __cnfn bitfield_extract_signed(ulong3, uint, uint);
-char4 __ovld __cnfn bitfield_extract_signed(uchar4, uint, uint);
-short4 __ovld __cnfn bitfield_extract_signed(ushort4, uint, uint);
-int4 __ovld __cnfn bitfield_extract_signed(uint4, uint, uint);
-long4 __ovld __cnfn bitfield_extract_signed(ulong4, uint, uint);
-char8 __ovld __cnfn bitfield_extract_signed(uchar8, uint, uint);
-short8 __ovld __cnfn bitfield_extract_signed(ushort8, uint, uint);
-int8 __ovld __cnfn bitfield_extract_signed(uint8, uint, uint);
-long8 __ovld __cnfn bitfield_extract_signed(ulong8, uint, uint);
-char16 __ovld __cnfn bitfield_extract_signed(uchar16, uint, uint);
-short16 __ovld __cnfn bitfield_extract_signed(ushort16, uint, uint);
-int16 __ovld __cnfn bitfield_extract_signed(uint16, uint, uint);
-long16 __ovld __cnfn bitfield_extract_signed(ulong16, uint, uint);
-
-uchar __ovld __cnfn bitfield_extract_unsigned(char, uint, uint);
-ushort __ovld __cnfn bitfield_extract_unsigned(short, uint, uint);
-uint __ovld __cnfn bitfield_extract_unsigned(int, uint, uint);
-ulong __ovld __cnfn bitfield_extract_unsigned(long, uint, uint);
-uchar2 __ovld __cnfn bitfield_extract_unsigned(char2, uint, uint);
-ushort2 __ovld __cnfn bitfield_extract_unsigned(short2, uint, uint);
-uint2 __ovld __cnfn bitfield_extract_unsigned(int2, uint, uint);
-ulong2 __ovld __cnfn bitfield_extract_unsigned(long2, uint, uint);
-uchar3 __ovld __cnfn bitfield_extract_unsigned(char3, uint, uint);
-ushort3 __ovld __cnfn bitfield_extract_unsigned(short3, uint, uint);
-uint3 __ovld __cnfn bitfield_extract_unsigned(int3, uint, uint);
-ulong3 __ovld __cnfn bitfield_extract_unsigned(long3, uint, uint);
-uchar4 __ovld __cnfn bitfield_extract_unsigned(char4, uint, uint);
-ushort4 __ovld __cnfn bitfield_extract_unsigned(short4, uint, uint);
-uint4 __ovld __cnfn bitfield_extract_unsigned(int4, uint, uint);
-ulong4 __ovld __cnfn bitfield_extract_unsigned(long4, uint, uint);
-uchar8 __ovld __cnfn bitfield_extract_unsigned(char8, uint, uint);
-ushort8 __ovld __cnfn bitfield_extract_unsigned(short8, uint, uint);
-uint8 __ovld __cnfn bitfield_extract_unsigned(int8, uint, uint);
-ulong8 __ovld __cnfn bitfield_extract_unsigned(long8, uint, uint);
-uchar16 __ovld __cnfn bitfield_extract_unsigned(char16, uint, uint);
-ushort16 __ovld __cnfn bitfield_extract_unsigned(short16, uint, uint);
-uint16 __ovld __cnfn bitfield_extract_unsigned(int16, uint, uint);
-ulong16 __ovld __cnfn bitfield_extract_unsigned(long16, uint, uint);
-
-uchar __ovld __cnfn bitfield_extract_unsigned(uchar, uint, uint);
-ushort __ovld __cnfn bitfield_extract_unsigned(ushort, uint, uint);
-uint __ovld __cnfn bitfield_extract_unsigned(uint, uint, uint);
-ulong __ovld __cnfn bitfield_extract_unsigned(ulong, uint, uint);
-uchar2 __ovld __cnfn bitfield_extract_unsigned(uchar2, uint, uint);
-ushort2 __ovld __cnfn bitfield_extract_unsigned(ushort2, uint, uint);
-uint2 __ovld __cnfn bitfield_extract_unsigned(uint2, uint, uint);
-ulong2 __ovld __cnfn bitfield_extract_unsigned(ulong2, uint, uint);
-uchar3 __ovld __cnfn bitfield_extract_unsigned(uchar3, uint, uint);
-ushort3 __ovld __cnfn bitfield_extract_unsigned(ushort3, uint, uint);
-uint3 __ovld __cnfn bitfield_extract_unsigned(uint3, uint, uint);
-ulong3 __ovld __cnfn bitfield_extract_unsigned(ulong3, uint, uint);
-uchar4 __ovld __cnfn bitfield_extract_unsigned(uchar4, uint, uint);
-ushort4 __ovld __cnfn bitfield_extract_unsigned(ushort4, uint, uint);
-uint4 __ovld __cnfn bitfield_extract_unsigned(uint4, uint, uint);
-ulong4 __ovld __cnfn bitfield_extract_unsigned(ulong4, uint, uint);
-uchar8 __ovld __cnfn bitfield_extract_unsigned(uchar8, uint, uint);
-ushort8 __ovld __cnfn bitfield_extract_unsigned(ushort8, uint, uint);
-uint8 __ovld __cnfn bitfield_extract_unsigned(uint8, uint, uint);
-ulong8 __ovld __cnfn bitfield_extract_unsigned(ulong8, uint, uint);
-uchar16 __ovld __cnfn bitfield_extract_unsigned(uchar16, uint, uint);
-ushort16 __ovld __cnfn bitfield_extract_unsigned(ushort16, uint, uint);
-uint16 __ovld __cnfn bitfield_extract_unsigned(uint16, uint, uint);
-ulong16 __ovld __cnfn bitfield_extract_unsigned(ulong16, uint, uint);
-
-char __ovld __cnfn bit_reverse(char);
-uchar __ovld __cnfn bit_reverse(uchar);
-short __ovld __cnfn bit_reverse(short);
-ushort __ovld __cnfn bit_reverse(ushort);
-int __ovld __cnfn bit_reverse(int);
-uint __ovld __cnfn bit_reverse(uint);
-long __ovld __cnfn bit_reverse(long);
-ulong __ovld __cnfn bit_reverse(ulong);
-char2 __ovld __cnfn bit_reverse(char2);
-uchar2 __ovld __cnfn bit_reverse(uchar2);
-short2 __ovld __cnfn bit_reverse(short2);
-ushort2 __ovld __cnfn bit_reverse(ushort2);
-int2 __ovld __cnfn bit_reverse(int2);
-uint2 __ovld __cnfn bit_reverse(uint2);
-long2 __ovld __cnfn bit_reverse(long2);
-ulong2 __ovld __cnfn bit_reverse(ulong2);
-char3 __ovld __cnfn bit_reverse(char3);
-uchar3 __ovld __cnfn bit_reverse(uchar3);
-short3 __ovld __cnfn bit_reverse(short3);
-ushort3 __ovld __cnfn bit_reverse(ushort3);
-int3 __ovld __cnfn bit_reverse(int3);
-uint3 __ovld __cnfn bit_reverse(uint3);
-long3 __ovld __cnfn bit_reverse(long3);
-ulong3 __ovld __cnfn bit_reverse(ulong3);
-char4 __ovld __cnfn bit_reverse(char4);
-uchar4 __ovld __cnfn bit_reverse(uchar4);
-short4 __ovld __cnfn bit_reverse(short4);
-ushort4 __ovld __cnfn bit_reverse(ushort4);
-int4 __ovld __cnfn bit_reverse(int4);
-uint4 __ovld __cnfn bit_reverse(uint4);
-long4 __ovld __cnfn bit_reverse(long4);
-ulong4 __ovld __cnfn bit_reverse(ulong4);
-char8 __ovld __cnfn bit_reverse(char8);
-uchar8 __ovld __cnfn bit_reverse(uchar8);
-short8 __ovld __cnfn bit_reverse(short8);
-ushort8 __ovld __cnfn bit_reverse(ushort8);
-int8 __ovld __cnfn bit_reverse(int8);
-uint8 __ovld __cnfn bit_reverse(uint8);
-long8 __ovld __cnfn bit_reverse(long8);
-ulong8 __ovld __cnfn bit_reverse(ulong8);
-char16 __ovld __cnfn bit_reverse(char16);
-uchar16 __ovld __cnfn bit_reverse(uchar16);
-short16 __ovld __cnfn bit_reverse(short16);
-ushort16 __ovld __cnfn bit_reverse(ushort16);
-int16 __ovld __cnfn bit_reverse(int16);
-uint16 __ovld __cnfn bit_reverse(uint16);
-long16 __ovld __cnfn bit_reverse(long16);
-ulong16 __ovld __cnfn bit_reverse(ulong16);
-#endif // cl_khr_extended_bit_ops
-
-#if defined(__opencl_c_integer_dot_product_input_4x8bit)
-uint __ovld __cnfn dot(uchar4, uchar4);
-int __ovld __cnfn dot(char4, char4);
-int __ovld __cnfn dot(uchar4, char4);
-int __ovld __cnfn dot(char4, uchar4);
-
-uint __ovld __cnfn dot_acc_sat(uchar4, uchar4, uint);
-int __ovld __cnfn dot_acc_sat(char4, char4, int);
-int __ovld __cnfn dot_acc_sat(uchar4, char4, int);
-int __ovld __cnfn dot_acc_sat(char4, uchar4, int);
-#endif // __opencl_c_integer_dot_product_input_4x8bit
-
-#if defined(__opencl_c_integer_dot_product_input_4x8bit_packed)
-uint __ovld __cnfn dot_4x8packed_uu_uint(uint, uint);
-int __ovld __cnfn dot_4x8packed_ss_int(uint, uint);
-int __ovld __cnfn dot_4x8packed_us_int(uint, uint);
-int __ovld __cnfn dot_4x8packed_su_int(uint, uint);
-
-uint __ovld __cnfn dot_acc_sat_4x8packed_uu_uint(uint, uint, uint);
-int __ovld __cnfn dot_acc_sat_4x8packed_ss_int(uint, uint, int);
-int __ovld __cnfn dot_acc_sat_4x8packed_us_int(uint, uint, int);
-int __ovld __cnfn dot_acc_sat_4x8packed_su_int(uint, uint, int);
-#endif // __opencl_c_integer_dot_product_input_4x8bit_packed
-
-#if defined(cl_intel_subgroups)
-// Intel-Specific Sub Group Functions
-float   __ovld __conv intel_sub_group_shuffle( float  x, uint c );
-float2  __ovld __conv intel_sub_group_shuffle( float2 x, uint c );
-float3  __ovld __conv intel_sub_group_shuffle( float3 x, uint c );
-float4  __ovld __conv intel_sub_group_shuffle( float4 x, uint c );
-float8  __ovld __conv intel_sub_group_shuffle( float8 x, uint c );
-float16 __ovld __conv intel_sub_group_shuffle( float16 x, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle( int  x, uint c );
-int2    __ovld __conv intel_sub_group_shuffle( int2 x, uint c );
-int3    __ovld __conv intel_sub_group_shuffle( int3 x, uint c );
-int4    __ovld __conv intel_sub_group_shuffle( int4 x, uint c );
-int8    __ovld __conv intel_sub_group_shuffle( int8 x, uint c );
-int16   __ovld __conv intel_sub_group_shuffle( int16 x, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle( uint  x, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle( uint2 x, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle( uint3 x, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle( uint4 x, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle( uint8 x, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle( uint16 x, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle( long x, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle( ulong x, uint c );
-
-float   __ovld __conv intel_sub_group_shuffle_down( float  cur, float  next, uint c );
-float2  __ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next, uint c );
-float3  __ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next, uint c );
-float4  __ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next, uint c );
-float8  __ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle_down( int  cur, int  next, uint c );
-int2    __ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next, uint c );
-int3    __ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next, uint c );
-int4    __ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next, uint c );
-int8    __ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next, uint c );
-int16   __ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle_down( uint  cur, uint  next, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle_down( long prev, long cur, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle_down( ulong prev, ulong cur, uint c );
-
-float   __ovld __conv intel_sub_group_shuffle_up( float  prev, float  cur, uint c );
-float2  __ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur, uint c );
-float3  __ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur, uint c );
-float4  __ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur, uint c );
-float8  __ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle_up( int  prev, int  cur, uint c );
-int2    __ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur, uint c );
-int3    __ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur, uint c );
-int4    __ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur, uint c );
-int8    __ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur, uint c );
-int16   __ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle_up( uint  prev, uint  cur, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle_up( long prev, long cur, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle_up( ulong prev, ulong cur, uint c );
-
-float   __ovld __conv intel_sub_group_shuffle_xor( float  x, uint c );
-float2  __ovld __conv intel_sub_group_shuffle_xor( float2 x, uint c );
-float3  __ovld __conv intel_sub_group_shuffle_xor( float3 x, uint c );
-float4  __ovld __conv intel_sub_group_shuffle_xor( float4 x, uint c );
-float8  __ovld __conv intel_sub_group_shuffle_xor( float8 x, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_xor( float16 x, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle_xor( int  x, uint c );
-int2    __ovld __conv intel_sub_group_shuffle_xor( int2 x, uint c );
-int3    __ovld __conv intel_sub_group_shuffle_xor( int3 x, uint c );
-int4    __ovld __conv intel_sub_group_shuffle_xor( int4 x, uint c );
-int8    __ovld __conv intel_sub_group_shuffle_xor( int8 x, uint c );
-int16   __ovld __conv intel_sub_group_shuffle_xor( int16 x, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle_xor( uint  x, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle_xor( uint2 x, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle_xor( uint3 x, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle_xor( uint4 x, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle_xor( uint8 x, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle_xor( uint16 x, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle_xor( long x, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle_xor( ulong x, uint c );
-
-uint    __ovld __conv intel_sub_group_block_read( read_only image2d_t image, int2 coord );
-uint2   __ovld __conv intel_sub_group_block_read2( read_only image2d_t image, int2 coord );
-uint4   __ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord );
-uint8   __ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint    __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord);
-uint2   __ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord);
-uint4   __ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord);
-uint8   __ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-uint    __ovld __conv intel_sub_group_block_read( const __global uint* p );
-uint2   __ovld __conv intel_sub_group_block_read2( const __global uint* p );
-uint4   __ovld __conv intel_sub_group_block_read4( const __global uint* p );
-uint8   __ovld __conv intel_sub_group_block_read8( const __global uint* p );
-
-void    __ovld __conv intel_sub_group_block_write(write_only image2d_t image, int2 coord, uint data);
-void    __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, int2 coord, uint2 data);
-void    __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data);
-void    __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void    __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord, uint data);
-void    __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data);
-void    __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data);
-void    __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-void    __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );
-void    __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data );
-void    __ovld __conv intel_sub_group_block_write4( __global uint* p, uint4 data );
-void    __ovld __conv intel_sub_group_block_write8( __global uint* p, uint8 data );
-
-#ifdef cl_khr_fp16
-half    __ovld __conv intel_sub_group_shuffle( half x, uint c );
-half    __ovld __conv intel_sub_group_shuffle_down( half prev, half cur, uint c );
-half    __ovld __conv intel_sub_group_shuffle_up( half prev, half cur, uint c );
-half    __ovld __conv intel_sub_group_shuffle_xor( half x, uint c );
-#endif
-
-#if defined(cl_khr_fp64)
-double  __ovld __conv intel_sub_group_shuffle( double x, uint c );
-double  __ovld __conv intel_sub_group_shuffle_down( double prev, double cur, uint c );
-double  __ovld __conv intel_sub_group_shuffle_up( double prev, double cur, uint c );
-double  __ovld __conv intel_sub_group_shuffle_xor( double x, uint c );
-#endif
-
-#endif //cl_intel_subgroups
-
-#if defined(cl_intel_subgroups_short)
-short       __ovld __conv intel_sub_group_broadcast( short  x, uint sub_group_local_id );
-short2      __ovld __conv intel_sub_group_broadcast( short2 x, uint sub_group_local_id );
-short3      __ovld __conv intel_sub_group_broadcast( short3 x, uint sub_group_local_id );
-short4      __ovld __conv intel_sub_group_broadcast( short4 x, uint sub_group_local_id );
-short8      __ovld __conv intel_sub_group_broadcast( short8 x, uint sub_group_local_id );
-
-ushort      __ovld __conv intel_sub_group_broadcast( ushort  x, uint sub_group_local_id );
-ushort2     __ovld __conv intel_sub_group_broadcast( ushort2 x, uint sub_group_local_id );
-ushort3     __ovld __conv intel_sub_group_broadcast( ushort3 x, uint sub_group_local_id );
-ushort4     __ovld __conv intel_sub_group_broadcast( ushort4 x, uint sub_group_local_id );
-ushort8     __ovld __conv intel_sub_group_broadcast( ushort8 x, uint sub_group_local_id );
-
-short       __ovld __conv intel_sub_group_shuffle( short   x, uint c );
-short2      __ovld __conv intel_sub_group_shuffle( short2  x, uint c );
-short3      __ovld __conv intel_sub_group_shuffle( short3  x, uint c );
-short4      __ovld __conv intel_sub_group_shuffle( short4  x, uint c );
-short8      __ovld __conv intel_sub_group_shuffle( short8  x, uint c );
-short16     __ovld __conv intel_sub_group_shuffle( short16 x, uint c);
-
-ushort      __ovld __conv intel_sub_group_shuffle( ushort   x, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle( ushort2  x, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle( ushort3  x, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle( ushort4  x, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle( ushort8  x, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle( ushort16 x, uint c );
-
-short       __ovld __conv intel_sub_group_shuffle_down( short   cur, short   next, uint c );
-short2      __ovld __conv intel_sub_group_shuffle_down( short2  cur, short2  next, uint c );
-short3      __ovld __conv intel_sub_group_shuffle_down( short3  cur, short3  next, uint c );
-short4      __ovld __conv intel_sub_group_shuffle_down( short4  cur, short4  next, uint c );
-short8      __ovld __conv intel_sub_group_shuffle_down( short8  cur, short8  next, uint c );
-short16     __ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next, uint c );
-
-ushort      __ovld __conv intel_sub_group_shuffle_down( ushort   cur, ushort   next, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle_down( ushort2  cur, ushort2  next, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle_down( ushort3  cur, ushort3  next, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle_down( ushort4  cur, ushort4  next, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle_down( ushort8  cur, ushort8  next, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next, uint c );
-
-short       __ovld __conv intel_sub_group_shuffle_up( short   cur, short   next, uint c );
-short2      __ovld __conv intel_sub_group_shuffle_up( short2  cur, short2  next, uint c );
-short3      __ovld __conv intel_sub_group_shuffle_up( short3  cur, short3  next, uint c );
-short4      __ovld __conv intel_sub_group_shuffle_up( short4  cur, short4  next, uint c );
-short8      __ovld __conv intel_sub_group_shuffle_up( short8  cur, short8  next, uint c );
-short16     __ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next, uint c );
-
-ushort      __ovld __conv intel_sub_group_shuffle_up( ushort   cur, ushort   next, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle_up( ushort2  cur, ushort2  next, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle_up( ushort3  cur, ushort3  next, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle_up( ushort4  cur, ushort4  next, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle_up( ushort8  cur, ushort8  next, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next, uint c );
-
-short       __ovld __conv intel_sub_group_shuffle_xor( short   x, uint c );
-short2      __ovld __conv intel_sub_group_shuffle_xor( short2  x, uint c );
-short3      __ovld __conv intel_sub_group_shuffle_xor( short3  x, uint c );
-short4      __ovld __conv intel_sub_group_shuffle_xor( short4  x, uint c );
-short8      __ovld __conv intel_sub_group_shuffle_xor( short8  x, uint c );
-short16     __ovld __conv intel_sub_group_shuffle_xor( short16 x, uint c );
-
-ushort      __ovld __conv intel_sub_group_shuffle_xor( ushort   x, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle_xor( ushort2  x, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle_xor( ushort3  x, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle_xor( ushort4  x, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle_xor( ushort8  x, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle_xor( ushort16 x, uint c );
-
-short       __ovld __conv intel_sub_group_reduce_add( short   x );
-ushort      __ovld __conv intel_sub_group_reduce_add( ushort  x );
-short       __ovld __conv intel_sub_group_reduce_min( short   x );
-ushort      __ovld __conv intel_sub_group_reduce_min( ushort  x );
-short       __ovld __conv intel_sub_group_reduce_max( short   x );
-ushort      __ovld __conv intel_sub_group_reduce_max( ushort  x );
-
-short       __ovld __conv intel_sub_group_scan_exclusive_add( short   x );
-ushort      __ovld __conv intel_sub_group_scan_exclusive_add( ushort  x );
-short       __ovld __conv intel_sub_group_scan_exclusive_min( short   x );
-ushort      __ovld __conv intel_sub_group_scan_exclusive_min( ushort  x );
-short       __ovld __conv intel_sub_group_scan_exclusive_max( short   x );
-ushort      __ovld __conv intel_sub_group_scan_exclusive_max( ushort  x );
-
-short       __ovld __conv intel_sub_group_scan_inclusive_add( short   x );
-ushort      __ovld __conv intel_sub_group_scan_inclusive_add( ushort  x );
-short       __ovld __conv intel_sub_group_scan_inclusive_min( short   x );
-ushort      __ovld __conv intel_sub_group_scan_inclusive_min( ushort  x );
-short       __ovld __conv intel_sub_group_scan_inclusive_max( short   x );
-ushort      __ovld __conv intel_sub_group_scan_inclusive_max( ushort  x );
-
-uint       __ovld __conv intel_sub_group_block_read_ui( read_only image2d_t image, int2 byte_coord );
-uint2      __ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t image, int2 byte_coord );
-uint4      __ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord );
-uint8      __ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint       __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord );
-uint2      __ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord );
-uint4      __ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord );
-uint8      __ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-uint       __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );
-uint2      __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p );
-uint4      __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p );
-uint8      __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p );
-
-void       __ovld __conv intel_sub_group_block_write_ui( read_only image2d_t image, int2 byte_coord, uint data );
-void       __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t image, int2 byte_coord, uint2 data );
-void       __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data );
-void       __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void       __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord, uint data );
-void       __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data );
-void       __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data );
-void       __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-void       __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );
-void       __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data );
-void       __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint4 data );
-void       __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data );
-
-ushort      __ovld __conv intel_sub_group_block_read_us( read_only image2d_t image, int2 coord );
-ushort2     __ovld __conv intel_sub_group_block_read_us2( read_only image2d_t image, int2 coord );
-ushort4     __ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord );
-ushort8     __ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-ushort      __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord);
-ushort2     __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord);
-ushort4     __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord);
-ushort8     __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-ushort      __ovld __conv intel_sub_group_block_read_us(  const __global ushort* p );
-ushort2     __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p );
-ushort4     __ovld __conv intel_sub_group_block_read_us4( const __global ushort* p );
-ushort8     __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p );
-
-void        __ovld __conv intel_sub_group_block_write_us(write_only image2d_t image, int2 coord, ushort  data);
-void        __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t image, int2 coord, ushort2 data);
-void        __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data);
-void        __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void        __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord, ushort  data);
-void        __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data);
-void        __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data);
-void        __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-void        __ovld __conv intel_sub_group_block_write_us(  __global ushort* p, ushort  data );
-void        __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data );
-void        __ovld __conv intel_sub_group_block_write_us4( __global ushort* p, ushort4 data );
-void        __ovld __conv intel_sub_group_block_write_us8( __global ushort* p, ushort8 data );
-#endif // cl_intel_subgroups_short
-
-#ifdef cl_intel_device_side_avc_motion_estimation
-#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin
-
-// MCE built-in functions
-uchar __ovld
-intel_sub_group_avc_mce_get_default_inter_base_multi_reference_penalty(
-    uchar slice_type, uchar qp);
-ulong __ovld intel_sub_group_avc_mce_get_default_inter_shape_penalty(
-    uchar slice_type, uchar qp);
-uchar __ovld intel_sub_group_avc_mce_get_default_inter_direction_penalty(
-    uchar slice_type, uchar qp);
-uint __ovld intel_sub_group_avc_mce_get_default_intra_luma_shape_penalty(
-    uchar slice_type, uchar qp);
-uint2 __ovld
-intel_sub_group_avc_mce_get_default_inter_motion_vector_cost_table(
-    uchar slice_type, uchar qp);
-uchar __ovld intel_sub_group_avc_mce_get_default_intra_luma_mode_penalty(
-    uchar slice_type, uchar qp);
-
-uint2 __ovld intel_sub_group_avc_mce_get_default_high_penalty_cost_table();
-uint2 __ovld intel_sub_group_avc_mce_get_default_medium_penalty_cost_table();
-uint2 __ovld intel_sub_group_avc_mce_get_default_low_penalty_cost_table();
-uint __ovld intel_sub_group_avc_mce_get_default_non_dc_luma_intra_penalty();
-uchar __ovld
-intel_sub_group_avc_mce_get_default_intra_chroma_mode_base_penalty();
-
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_inter_shape_penalty(
-    ulong packed_shape_penalty, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_ac_only_haar(
-    intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_mce_payload_t payload);
-
-ulong __ovld intel_sub_group_avc_mce_get_motion_vectors(
-    intel_sub_group_avc_mce_result_t result);
-ushort __ovld intel_sub_group_avc_mce_get_inter_distortions(
-    intel_sub_group_avc_mce_result_t result);
-ushort __ovld intel_sub_group_avc_mce_get_best_inter_distortion(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_major_shape(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_minor_shapes(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_directions(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_motion_vector_count(
-    intel_sub_group_avc_mce_result_t result);
-uint __ovld intel_sub_group_avc_mce_get_inter_reference_ids(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld
-intel_sub_group_avc_mce_get_inter_reference_interlaced_field_polarities(
-    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
-    intel_sub_group_avc_mce_result_t result);
-
-// IME built-in functions
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_initialize(
-    ushort2 src_coord, uchar partition_mask, uchar sad_adjustment);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_single_reference(
-    short2 ref_offset, uchar search_window_config,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_dual_reference(
-    short2 fwd_ref_offset, short2 bwd_ref_offset, uchar search_window_config,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_max_motion_vector_count(
-    uchar max_motion_vector_count, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_unidirectional_mix_disable(
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_early_search_termination_threshold(
-    uchar threshold, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_weighted_sad(
-    uint packed_sad_weights, intel_sub_group_avc_ime_payload_t payload);
-
-__attribute__((deprecated("If you use the latest Intel driver, please use "
-                          "intel_sub_group_avc_ime_ref_window_size instead",
-                          "intel_sub_group_avc_ime_ref_window_size")))
-ushort2 __ovld
-intel_sub_group_ime_ref_window_size(uchar search_window_config, char dual_ref);
-ushort2 __ovld intel_sub_group_avc_ime_ref_window_size(
-    uchar search_window_config, char dual_ref);
-short2 __ovld intel_sub_group_avc_ime_adjust_ref_offset(
-    short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
-    ushort2 image_size);
-
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference_streamout(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference_streamout(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference_streamin(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference_streamin(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
-intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference_streaminout(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
-intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
-
-intel_sub_group_avc_ime_single_reference_streamin_t __ovld
-intel_sub_group_avc_ime_get_single_reference_streamin(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result);
-intel_sub_group_avc_ime_dual_reference_streamin_t __ovld
-intel_sub_group_avc_ime_get_dual_reference_streamin(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_strip_single_reference_streamout(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_strip_dual_reference_streamout(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
-
-uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
-    uchar major_shape);
-ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
-    uchar major_shape);
-uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
-    uchar major_shape);
-uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
-    uchar major_shape, uchar direction);
-ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
-    uchar major_shape, uchar direction);
-uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
-    uchar major_shape, uchar direction);
-
-uchar __ovld intel_sub_group_avc_ime_get_border_reached(
-    uchar image_select, intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ime_get_truncated_search_indication(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld
-intel_sub_group_avc_ime_get_unidirectional_early_search_termination(
-    intel_sub_group_avc_ime_result_t result);
-uint __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_motion_vector(
-    intel_sub_group_avc_ime_result_t result);
-ushort __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_distortion(
-    intel_sub_group_avc_ime_result_t result);
-
-// REF built-in functions
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_fme_initialize(
-    ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
-    uchar minor_shapes, uchar directions, uchar pixel_resolution,
-    uchar sad_adjustment);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_bme_initialize(
-    ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
-    uchar minor_shapes, uchar directions, uchar pixel_resolution,
-    uchar bidirectional_weight, uchar sad_adjustment);
-
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_bidirectional_mix_disable(
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_bilinear_filter_enable(
-    intel_sub_group_avc_ref_payload_t payload);
-
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_single_reference(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_dual_reference(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ref_payload_t payload);
-
-// SIC built-in functions
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_initialize(
-    ushort2 src_coord);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_skc(
-    uint skip_block_partition_type, uint skip_motion_vector_mask,
-    ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment,
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_ipe(
-    uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
-    uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
-    uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
-    uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_ipe(
-    uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
-    uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
-    uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
-    ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel,
-    ushort upper_edge_chroma_pixels, uchar intra_sad_adjustment,
-    intel_sub_group_avc_sic_payload_t payload);
-uint __ovld
-intel_sub_group_avc_sic_get_motion_vector_mask(
-    uint skip_block_partition_type, uchar direction);
-
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_intra_luma_shape_penalty(
-    uint packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_intra_luma_mode_cost_function(
-    uchar luma_mode_penalty, uint luma_packed_neighbor_modes,
-    uint luma_packed_non_dc_penalty, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_intra_chroma_mode_cost_function(
-    uchar chroma_mode_penalty, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_skc_bilinear_filter_enable(
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_skc_forward_transform_enable(
-    ulong packed_sad_coefficients, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_block_based_raw_skip_sad(
-    uchar block_based_skip_type,
-    intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_ipe(
-    read_only image2d_t src_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_single_reference(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_dual_reference(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
-    intel_sub_group_avc_sic_payload_t payload);
-
-uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
-    intel_sub_group_avc_sic_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_best_ipe_luma_distortion(
-    intel_sub_group_avc_sic_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_best_ipe_chroma_distortion(
-    intel_sub_group_avc_sic_result_t result);
-ulong __ovld intel_sub_group_avc_sic_get_packed_ipe_luma_modes(
-    intel_sub_group_avc_sic_result_t result);
-uchar __ovld intel_sub_group_avc_sic_get_ipe_chroma_mode(
-    intel_sub_group_avc_sic_result_t result);
-uint __ovld intel_sub_group_avc_sic_get_packed_skc_luma_count_threshold(
-    intel_sub_group_avc_sic_result_t result);
-ulong __ovld intel_sub_group_avc_sic_get_packed_skc_luma_sum_threshold(
-    intel_sub_group_avc_sic_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_inter_raw_sads(
-    intel_sub_group_avc_sic_result_t result);
-
-// Wrappers
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_inter_shape_penalty(
-    ulong packed_shape_cost, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_inter_shape_penalty(
-    ulong packed_shape_cost, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_inter_shape_penalty(
-    ulong packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_ac_only_haar(
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_ac_only_haar(
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_ac_only_haar(
-    intel_sub_group_avc_sic_payload_t payload);
-
-ulong __ovld intel_sub_group_avc_ime_get_motion_vectors(
-    intel_sub_group_avc_ime_result_t result);
-ulong __ovld intel_sub_group_avc_ref_get_motion_vectors(
-    intel_sub_group_avc_ref_result_t result);
-
-ushort __ovld intel_sub_group_avc_ime_get_inter_distortions(
-    intel_sub_group_avc_ime_result_t result);
-ushort __ovld intel_sub_group_avc_ref_get_inter_distortions(
-    intel_sub_group_avc_ref_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_inter_distortions(
-    intel_sub_group_avc_sic_result_t result);
-
-ushort __ovld intel_sub_group_avc_ime_get_best_inter_distortion(
-    intel_sub_group_avc_ime_result_t result);
-ushort __ovld intel_sub_group_avc_ref_get_best_inter_distortion(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld intel_sub_group_avc_ime_get_inter_major_shape(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_major_shape(
-    intel_sub_group_avc_ref_result_t result);
-uchar __ovld intel_sub_group_avc_ime_get_inter_minor_shapes(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_minor_shapes(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld intel_sub_group_avc_ime_get_inter_directions(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_directions(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld intel_sub_group_avc_ime_get_inter_motion_vector_count(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_motion_vector_count(
-    intel_sub_group_avc_ref_result_t result);
-
-uint __ovld intel_sub_group_avc_ime_get_inter_reference_ids(
-    intel_sub_group_avc_ime_result_t result);
-uint __ovld intel_sub_group_avc_ref_get_inter_reference_ids(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld
-intel_sub_group_avc_ime_get_inter_reference_interlaced_field_polarities(
-    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld
-intel_sub_group_avc_ref_get_inter_reference_interlaced_field_polarities(
-    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
-    intel_sub_group_avc_ref_result_t result);
-
-// Type conversion functions
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_ime_convert_to_mce_payload(
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_mce_convert_to_ime_payload(
-    intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_ref_convert_to_mce_payload(
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_mce_convert_to_ref_payload(
-    intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_sic_convert_to_mce_payload(
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_mce_convert_to_sic_payload(
-    intel_sub_group_avc_mce_payload_t payload);
-
-intel_sub_group_avc_mce_result_t __ovld
-intel_sub_group_avc_ime_convert_to_mce_result(
-    intel_sub_group_avc_ime_result_t result);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_mce_convert_to_ime_result(
-    intel_sub_group_avc_mce_result_t result);
-intel_sub_group_avc_mce_result_t __ovld
-intel_sub_group_avc_ref_convert_to_mce_result(
-    intel_sub_group_avc_ref_result_t result);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_mce_convert_to_ref_result(
-    intel_sub_group_avc_mce_result_t result);
-intel_sub_group_avc_mce_result_t __ovld
-intel_sub_group_avc_sic_convert_to_mce_result(
-    intel_sub_group_avc_sic_result_t result);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_mce_convert_to_sic_result(
-    intel_sub_group_avc_mce_result_t result);
-#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end
-#endif // cl_intel_device_side_avc_motion_estimation
-
-#ifdef cl_amd_media_ops
-uint __ovld amd_bitalign(uint a, uint b, uint c);
-uint2 __ovld amd_bitalign(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_bitalign(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_bitalign(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_bitalign(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_bitalign(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_bytealign(uint a, uint b, uint c);
-uint2 __ovld amd_bytealign(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_bytealign(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_bytealign(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_bytealign(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_bytealign(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_lerp(uint a, uint b, uint c);
-uint2 __ovld amd_lerp(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_lerp(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_lerp(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_lerp(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_lerp(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_pack(float4 v);
-
-uint __ovld amd_sad4(uint4 x, uint4 y, uint z);
-
-uint __ovld amd_sadhi(uint a, uint b, uint c);
-uint2 __ovld amd_sadhi(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_sadhi(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_sadhi(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_sadhi(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_sadhi(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_sad(uint a, uint b, uint c);
-uint2 __ovld amd_sad(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_sad(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_sad(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_sad(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_sad(uint16 a, uint16 b, uint16 c);
-
-float __ovld amd_unpack0(uint a);
-float2 __ovld amd_unpack0(uint2 a);
-float3 __ovld amd_unpack0(uint3 a);
-float4 __ovld amd_unpack0(uint4 a);
-float8 __ovld amd_unpack0(uint8 a);
-float16 __ovld amd_unpack0(uint16 a);
-
-float __ovld amd_unpack1(uint a);
-float2 __ovld amd_unpack1(uint2 a);
-float3 __ovld amd_unpack1(uint3 a);
-float4 __ovld amd_unpack1(uint4 a);
-float8 __ovld amd_unpack1(uint8 a);
-float16 __ovld amd_unpack1(uint16 a);
-
-float __ovld amd_unpack2(uint a);
-float2 __ovld amd_unpack2(uint2 a);
-float3 __ovld amd_unpack2(uint3 a);
-float4 __ovld amd_unpack2(uint4 a);
-float8 __ovld amd_unpack2(uint8 a);
-float16 __ovld amd_unpack2(uint16 a);
-
-float __ovld amd_unpack3(uint a);
-float2 __ovld amd_unpack3(uint2 a);
-float3 __ovld amd_unpack3(uint3 a);
-float4 __ovld amd_unpack3(uint4 a);
-float8 __ovld amd_unpack3(uint8 a);
-float16 __ovld amd_unpack3(uint16 a);
-#endif // cl_amd_media_ops
-
-#ifdef cl_amd_media_ops2
-int __ovld amd_bfe(int src0, uint src1, uint src2);
-int2 __ovld amd_bfe(int2 src0, uint2 src1, uint2 src2);
-int3 __ovld amd_bfe(int3 src0, uint3 src1, uint3 src2);
-int4 __ovld amd_bfe(int4 src0, uint4 src1, uint4 src2);
-int8 __ovld amd_bfe(int8 src0, uint8 src1, uint8 src2);
-int16 __ovld amd_bfe(int16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_bfe(uint src0, uint src1, uint src2);
-uint2 __ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_bfm(uint src0, uint src1);
-uint2 __ovld amd_bfm(uint2 src0, uint2 src1);
-uint3 __ovld amd_bfm(uint3 src0, uint3 src1);
-uint4 __ovld amd_bfm(uint4 src0, uint4 src1);
-uint8 __ovld amd_bfm(uint8 src0, uint8 src1);
-uint16 __ovld amd_bfm(uint16 src0, uint16 src1);
-
-float __ovld amd_max3(float src0, float src1, float src2);
-float2 __ovld amd_max3(float2 src0, float2 src1, float2 src2);
-float3 __ovld amd_max3(float3 src0, float3 src1, float3 src2);
-float4 __ovld amd_max3(float4 src0, float4 src1, float4 src2);
-float8 __ovld amd_max3(float8 src0, float8 src1, float8 src2);
-float16 __ovld amd_max3(float16 src0, float16 src1, float16 src2);
-
-int __ovld amd_max3(int src0, int src1, int src2);
-int2 __ovld amd_max3(int2 src0, int2 src1, int2 src2);
-int3 __ovld amd_max3(int3 src0, int3 src1, int3 src2);
-int4 __ovld amd_max3(int4 src0, int4 src1, int4 src2);
-int8 __ovld amd_max3(int8 src0, int8 src1, int8 src2);
-int16 __ovld amd_max3(int16 src0, int16 src1, int16 src2);
-
-uint __ovld amd_max3(uint src0, uint src1, uint src2);
-uint2 __ovld amd_max3(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_max3(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_max3(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_max3(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_max3(uint16 src0, uint16 src1, uint16 src2);
-
-float __ovld amd_median3(float src0, float src1, float src2);
-float2 __ovld amd_median3(float2 src0, float2 src1, float2 src2);
-float3 __ovld amd_median3(float3 src0, float3 src1, float3 src2);
-float4 __ovld amd_median3(float4 src0, float4 src1, float4 src2);
-float8 __ovld amd_median3(float8 src0, float8 src1, float8 src2);
-float16 __ovld amd_median3(float16 src0, float16 src1, float16 src2);
-
-int __ovld amd_median3(int src0, int src1, int src2);
-int2 __ovld amd_median3(int2 src0, int2 src1, int2 src2);
-int3 __ovld amd_median3(int3 src0, int3 src1, int3 src2);
-int4 __ovld amd_median3(int4 src0, int4 src1, int4 src2);
-int8 __ovld amd_median3(int8 src0, int8 src1, int8 src2);
-int16 __ovld amd_median3(int16 src0, int16 src1, int16 src2);
-
-uint __ovld amd_median3(uint src0, uint src1, uint src2);
-uint2 __ovld amd_median3(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_median3(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_median3(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_median3(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_median3(uint16 src0, uint16 src1, uint16 src2);
-
-float __ovld amd_min3(float src0, float src1, float src);
-float2 __ovld amd_min3(float2 src0, float2 src1, float2 src);
-float3 __ovld amd_min3(float3 src0, float3 src1, float3 src);
-float4 __ovld amd_min3(float4 src0, float4 src1, float4 src);
-float8 __ovld amd_min3(float8 src0, float8 src1, float8 src);
-float16 __ovld amd_min3(float16 src0, float16 src1, float16 src);
-
-int __ovld amd_min3(int src0, int src1, int src2);
-int2 __ovld amd_min3(int2 src0, int2 src1, int2 src2);
-int3 __ovld amd_min3(int3 src0, int3 src1, int3 src2);
-int4 __ovld amd_min3(int4 src0, int4 src1, int4 src2);
-int8 __ovld amd_min3(int8 src0, int8 src1, int8 src2);
-int16 __ovld amd_min3(int16 src0, int16 src1, int16 src2);
-
-uint __ovld amd_min3(uint src0, uint src1, uint src2);
-uint2 __ovld amd_min3(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_min3(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_min3(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_min3(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_min3(uint16 src0, uint16 src1, uint16 src2);
-
-ulong __ovld amd_mqsad(ulong src0, uint src1, ulong src2);
-ulong2 __ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2);
-ulong3 __ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2);
-ulong4 __ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2);
-ulong8 __ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2);
-ulong16 __ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2);
-
-ulong __ovld amd_qsad(ulong src0, uint src1, ulong src2);
-ulong2 __ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2);
-ulong3 __ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2);
-ulong4 __ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2);
-ulong8 __ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2);
-ulong16 __ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2);
-
-uint __ovld amd_msad(uint src0, uint src1, uint src2);
-uint2 __ovld amd_msad(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_msad(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_msad(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_msad(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_msad(uint16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_sadd(uint src0, uint src1, uint src2);
-uint2 __ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_sadw(uint src0, uint src1, uint src2);
-uint2 __ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
-#endif // cl_amd_media_ops2
-
-#if defined(cl_arm_integer_dot_product_int8)
-uint __ovld arm_dot(uchar4 a, uchar4 b);
-int __ovld arm_dot(char4 a, char4 b);
-#endif // defined(cl_arm_integer_dot_product_int8)
-
-#if defined(cl_arm_integer_dot_product_accumulate_int8)
-uint __ovld arm_dot_acc(uchar4 a, uchar4 b, uint c);
-int __ovld arm_dot_acc(char4 a, char4 b, int c);
-#endif // defined(cl_arm_integer_dot_product_accumulate_int8)
-
-#if defined(cl_arm_integer_dot_product_accumulate_int16)
-uint __ovld arm_dot_acc(ushort2 a, ushort2 b, uint c);
-int __ovld arm_dot_acc(short2 a, short2 b, int c);
-#endif // defined(cl_arm_integer_dot_product_accumulate_int16)
-
-#if defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
-uint __ovld arm_dot_acc_sat(uchar4 a, uchar4 b, uint c);
-int __ovld arm_dot_acc_sat(char4 a, char4 b, int c);
-#endif // defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
-
-// Disable any extensions we may have enabled previously.
-#pragma OPENCL EXTENSION all : disable
-
-#undef __cnfn
-#undef __ovld
-#endif //_OPENCL_H_
diff --git a/linux-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc b/linux-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc
deleted file mode 100644
index 008b8dd..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc
+++ /dev/null
@@ -1,896 +0,0 @@
-/*===-- InstrProfData.inc - instr profiling runtime structures -*- C++ -*-=== *\
-|*
-|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-|* See https://llvm.org/LICENSE.txt for license information.
-|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-|*
-\*===----------------------------------------------------------------------===*/
-/*
- * This is the main file that defines all the data structure, signature,
- * constant literals that are shared across profiling runtime library,
- * compiler (instrumentation), and host tools (reader/writer). The entities
- * defined in this file affect the profile runtime ABI, the raw profile format,
- * or both.
- *
- * The file has two identical copies. The primary copy lives in LLVM and
- * the other one  sits in compiler-rt/lib/profile directory. To make changes
- * in this file, first modify the primary copy and copy it over to compiler-rt.
- * Testing of any change in this file can start only after the two copies are
- * synced up.
- *
- * The first part of the file includes macros that defines types, names, and
- * initializers for the member fields of the core data structures. The field
- * declarations for one structure is enabled by defining the field activation
- * macro associated with that structure. Only one field activation record
- * can be defined at one time and the rest definitions will be filtered out by
- * the preprocessor.
- *
- * Examples of how the template is used to instantiate structure definition:
- * 1. To declare a structure:
- *
- * struct ProfData {
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- *    Type Name;
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- * 2. To construct LLVM type arrays for the struct type:
- *
- * Type *DataTypes[] = {
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- *   LLVMType,
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- * 4. To construct constant array for the initializers:
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- *   Initializer,
- * Constant *ConstantVals[] = {
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- *
- * The second part of the file includes definitions all other entities that
- * are related to runtime ABI and format. When no field activation macro is
- * defined, this file can be included to introduce the definitions.
- *
-\*===----------------------------------------------------------------------===*/
-
-/* Functions marked with INSTR_PROF_VISIBILITY must have hidden visibility in
- * the compiler runtime. */
-#ifndef INSTR_PROF_VISIBILITY
-#define INSTR_PROF_VISIBILITY
-#endif
-
-/* INSTR_PROF_DATA start. */
-/* Definition of member fields of the per-function control structure. */
-#ifndef INSTR_PROF_DATA
-#define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
-                ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
-                IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
-INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
-                ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
-                Inc->getHash()->getZExtValue()))
-INSTR_PROF_DATA(const IntPtrT, IntPtrTy, CounterPtr, RelativeCounterPtr)
-/* This is used to map function pointers for the indirect call targets to
- * function name hashes during the conversion from raw to merged profile
- * data.
- */
-INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), FunctionPointer, \
-                FunctionAddr)
-INSTR_PROF_DATA(IntPtrT, llvm::Type::getInt8PtrTy(Ctx), Values, \
-                ValuesPtrExpr)
-INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NumCounters, \
-                ConstantInt::get(llvm::Type::getInt32Ty(Ctx), NumCounters))
-INSTR_PROF_DATA(const uint16_t, Int16ArrayTy, NumValueSites[IPVK_Last+1], \
-                ConstantArray::get(Int16ArrayTy, Int16ArrayVals))
-#undef INSTR_PROF_DATA
-/* INSTR_PROF_DATA end. */
-
-
-/* This is an internal data structure used by value profiler. It
- * is defined here to allow serialization code sharing by LLVM
- * to be used in unit test.
- *
- * typedef struct ValueProfNode {
- *   // InstrProfValueData VData;
- *   uint64_t Value;
- *   uint64_t Count;
- *   struct ValueProfNode *Next;
- * } ValueProfNode;
- */
-/* INSTR_PROF_VALUE_NODE start. */
-#ifndef INSTR_PROF_VALUE_NODE
-#define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Value, \
-                      ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
-INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Count, \
-                      ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
-INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \
-                      ConstantInt::get(llvm::Type::GetInt8PtrTy(Ctx), 0))
-#undef INSTR_PROF_VALUE_NODE
-/* INSTR_PROF_VALUE_NODE end. */
-
-/* INSTR_PROF_RAW_HEADER  start */
-/* Definition of member fields of the raw profile header data structure. */
-#ifndef INSTR_PROF_RAW_HEADER
-#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
-INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
-INSTR_PROF_RAW_HEADER(uint64_t, BinaryIdsSize, __llvm_write_binary_ids(NULL))
-INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
-INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters)
-INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
-INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters)
-INSTR_PROF_RAW_HEADER(uint64_t, NamesSize,  NamesSize)
-INSTR_PROF_RAW_HEADER(uint64_t, CountersDelta,
-                      (uintptr_t)CountersBegin - (uintptr_t)DataBegin)
-INSTR_PROF_RAW_HEADER(uint64_t, NamesDelta, (uintptr_t)NamesBegin)
-INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
-#undef INSTR_PROF_RAW_HEADER
-/* INSTR_PROF_RAW_HEADER  end */
-
-/* VALUE_PROF_FUNC_PARAM start */
-/* Definition of parameter types of the runtime API used to do value profiling
- * for a given value site.
- */
-#ifndef VALUE_PROF_FUNC_PARAM
-#define VALUE_PROF_FUNC_PARAM(ArgType, ArgName, ArgLLVMType)
-#define INSTR_PROF_COMMA
-#else
-#define INSTR_PROF_DATA_DEFINED
-#define INSTR_PROF_COMMA ,
-#endif
-VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \
-                      INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
-#undef VALUE_PROF_FUNC_PARAM
-#undef INSTR_PROF_COMMA
-/* VALUE_PROF_FUNC_PARAM end */
-
-/* VALUE_PROF_KIND start */
-#ifndef VALUE_PROF_KIND
-#define VALUE_PROF_KIND(Enumerator, Value, Descr)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-/* For indirect function call value profiling, the addresses of the target
- * functions are profiled by the instrumented code. The target addresses are
- * written in the raw profile data and converted to target function name's MD5
- * hash by the profile reader during deserialization.  Typically, this happens
- * when the raw profile data is read during profile merging.
- *
- * For this remapping the ProfData is used.  ProfData contains both the function
- * name hash and the function address.
- */
-VALUE_PROF_KIND(IPVK_IndirectCallTarget, 0, "indirect call target")
-/* For memory intrinsic functions size profiling. */
-VALUE_PROF_KIND(IPVK_MemOPSize, 1, "memory intrinsic functions size")
-/* These two kinds must be the last to be
- * declared. This is to make sure the string
- * array created with the template can be
- * indexed with the kind value.
- */
-VALUE_PROF_KIND(IPVK_First, IPVK_IndirectCallTarget, "first")
-VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize, "last")
-
-#undef VALUE_PROF_KIND
-/* VALUE_PROF_KIND end */
-
-#undef COVMAP_V2_OR_V3
-#ifdef COVMAP_V2
-#define COVMAP_V2_OR_V3
-#endif
-#ifdef COVMAP_V3
-#define COVMAP_V2_OR_V3
-#endif
-
-/* COVMAP_FUNC_RECORD start */
-/* Definition of member fields of the function record structure in coverage
- * map.
- */
-#ifndef COVMAP_FUNC_RECORD
-#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-#ifdef COVMAP_V1
-COVMAP_FUNC_RECORD(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), \
-                   NamePtr, llvm::ConstantExpr::getBitCast(NamePtr, \
-                   llvm::Type::getInt8PtrTy(Ctx)))
-COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
-                   llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \
-                   NameValue.size()))
-#endif
-#ifdef COVMAP_V2_OR_V3
-COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
-                   llvm::ConstantInt::get( \
-                     llvm::Type::getInt64Ty(Ctx), NameHash))
-#endif
-COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
-                   llvm::ConstantInt::get( \
-                     llvm::Type::getInt32Ty(Ctx), CoverageMapping.size()))
-COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
-                   llvm::ConstantInt::get( \
-                     llvm::Type::getInt64Ty(Ctx), FuncHash))
-#ifdef COVMAP_V3
-COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FilenamesRef, \
-                   llvm::ConstantInt::get( \
-                     llvm::Type::getInt64Ty(Ctx), FilenamesRef))
-COVMAP_FUNC_RECORD(const char, \
-                   llvm::ArrayType::get(llvm::Type::getInt8Ty(Ctx), \
-                                        CoverageMapping.size()), \
-                   CoverageMapping,
-                   llvm::ConstantDataArray::getRaw( \
-                     CoverageMapping, CoverageMapping.size(), \
-                     llvm::Type::getInt8Ty(Ctx)))
-#endif
-#undef COVMAP_FUNC_RECORD
-/* COVMAP_FUNC_RECORD end.  */
-
-/* COVMAP_HEADER start */
-/* Definition of member fields of coverage map header.
- */
-#ifndef COVMAP_HEADER
-#define COVMAP_HEADER(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \
-              llvm::ConstantInt::get(Int32Ty, NRecords))
-COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \
-              llvm::ConstantInt::get(Int32Ty, FilenamesSize))
-COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \
-              llvm::ConstantInt::get(Int32Ty, CoverageMappingSize))
-COVMAP_HEADER(uint32_t, Int32Ty, Version, \
-              llvm::ConstantInt::get(Int32Ty, CovMapVersion::CurrentVersion))
-#undef COVMAP_HEADER
-/* COVMAP_HEADER end.  */
-
-
-#ifdef INSTR_PROF_SECT_ENTRY
-#define INSTR_PROF_DATA_DEFINED
-INSTR_PROF_SECT_ENTRY(IPSK_data, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON), \
-                      INSTR_PROF_DATA_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_cnts, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON), \
-                      INSTR_PROF_CNTS_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_name, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON), \
-                      INSTR_PROF_NAME_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_vals, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON), \
-                      INSTR_PROF_VALS_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_vnodes, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON), \
-                      INSTR_PROF_VNODES_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_covmap, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \
-                      INSTR_PROF_COVMAP_COFF, "__LLVM_COV,")
-INSTR_PROF_SECT_ENTRY(IPSK_covfun, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON), \
-                      INSTR_PROF_COVFUN_COFF, "__LLVM_COV,")
-INSTR_PROF_SECT_ENTRY(IPSK_orderfile, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON), \
-                      INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COFF), "__DATA,")
-
-#undef INSTR_PROF_SECT_ENTRY
-#endif
-
-
-#ifdef INSTR_PROF_VALUE_PROF_DATA
-#define INSTR_PROF_DATA_DEFINED
-
-#define INSTR_PROF_MAX_NUM_VAL_PER_SITE 255
-/*!
- * This is the header of the data structure that defines the on-disk
- * layout of the value profile data of a particular kind for one function.
- */
-typedef struct ValueProfRecord {
-  /* The kind of the value profile record. */
-  uint32_t Kind;
-  /*
-   * The number of value profile sites. It is guaranteed to be non-zero;
-   * otherwise the record for this kind won't be emitted.
-   */
-  uint32_t NumValueSites;
-  /*
-   * The first element of the array that stores the number of profiled
-   * values for each value site. The size of the array is NumValueSites.
-   * Since NumValueSites is greater than zero, there is at least one
-   * element in the array.
-   */
-  uint8_t SiteCountArray[1];
-
-  /*
-   * The fake declaration is for documentation purpose only.
-   * Align the start of next field to be on 8 byte boundaries.
-  uint8_t Padding[X];
-   */
-
-  /* The array of value profile data. The size of the array is the sum
-   * of all elements in SiteCountArray[].
-  InstrProfValueData ValueData[];
-   */
-
-#ifdef __cplusplus
-  /*!
-   * Return the number of value sites.
-   */
-  uint32_t getNumValueSites() const { return NumValueSites; }
-  /*!
-   * Read data from this record and save it to Record.
-   */
-  void deserializeTo(InstrProfRecord &Record,
-                     InstrProfSymtab *SymTab);
-  /*
-   * In-place byte swap:
-   * Do byte swap for this instance. \c Old is the original order before
-   * the swap, and \c New is the New byte order.
-   */
-  void swapBytes(support::endianness Old, support::endianness New);
-#endif
-} ValueProfRecord;
-
-/*!
- * Per-function header/control data structure for value profiling
- * data in indexed format.
- */
-typedef struct ValueProfData {
-  /*
-   * Total size in bytes including this field. It must be a multiple
-   * of sizeof(uint64_t).
-   */
-  uint32_t TotalSize;
-  /*
-   *The number of value profile kinds that has value profile data.
-   * In this implementation, a value profile kind is considered to
-   * have profile data if the number of value profile sites for the
-   * kind is not zero. More aggressively, the implementation can
-   * choose to check the actual data value: if none of the value sites
-   * has any profiled values, the kind can be skipped.
-   */
-  uint32_t NumValueKinds;
-
-  /*
-   * Following are a sequence of variable length records. The prefix/header
-   * of each record is defined by ValueProfRecord type. The number of
-   * records is NumValueKinds.
-   * ValueProfRecord Record_1;
-   * ValueProfRecord Record_N;
-   */
-
-#if __cplusplus
-  /*!
-   * Return the total size in bytes of the on-disk value profile data
-   * given the data stored in Record.
-   */
-  static uint32_t getSize(const InstrProfRecord &Record);
-  /*!
-   * Return a pointer to \c ValueProfData instance ready to be streamed.
-   */
-  static std::unique_ptr<ValueProfData>
-  serializeFrom(const InstrProfRecord &Record);
-  /*!
-   * Check the integrity of the record.
-   */
-  Error checkIntegrity();
-  /*!
-   * Return a pointer to \c ValueProfileData instance ready to be read.
-   * All data in the instance are properly byte swapped. The input
-   * data is assumed to be in little endian order.
-   */
-  static Expected<std::unique_ptr<ValueProfData>>
-  getValueProfData(const unsigned char *SrcBuffer,
-                   const unsigned char *const SrcBufferEnd,
-                   support::endianness SrcDataEndianness);
-  /*!
-   * Swap byte order from \c Endianness order to host byte order.
-   */
-  void swapBytesToHost(support::endianness Endianness);
-  /*!
-   * Swap byte order from host byte order to \c Endianness order.
-   */
-  void swapBytesFromHost(support::endianness Endianness);
-  /*!
-   * Return the total size of \c ValueProfileData.
-   */
-  uint32_t getSize() const { return TotalSize; }
-  /*!
-   * Read data from this data and save it to \c Record.
-   */
-  void deserializeTo(InstrProfRecord &Record,
-                     InstrProfSymtab *SymTab);
-  void operator delete(void *ptr) { ::operator delete(ptr); }
-#endif
-} ValueProfData;
-
-/*
- * The closure is designed to abstact away two types of value profile data:
- * - InstrProfRecord which is the primary data structure used to
- *   represent profile data in host tools (reader, writer, and profile-use)
- * - value profile runtime data structure suitable to be used by C
- *   runtime library.
- *
- * Both sources of data need to serialize to disk/memory-buffer in common
- * format: ValueProfData. The abstraction allows compiler-rt's raw profiler
- * writer to share the same format and code with indexed profile writer.
- *
- * For documentation of the member methods below, refer to corresponding methods
- * in class InstrProfRecord.
- */
-typedef struct ValueProfRecordClosure {
-  const void *Record;
-  uint32_t (*GetNumValueKinds)(const void *Record);
-  uint32_t (*GetNumValueSites)(const void *Record, uint32_t VKind);
-  uint32_t (*GetNumValueData)(const void *Record, uint32_t VKind);
-  uint32_t (*GetNumValueDataForSite)(const void *R, uint32_t VK, uint32_t S);
-
-  /*
-   * After extracting the value profile data from the value profile record,
-   * this method is used to map the in-memory value to on-disk value. If
-   * the method is null, value will be written out untranslated.
-   */
-  uint64_t (*RemapValueData)(uint32_t, uint64_t Value);
-  void (*GetValueForSite)(const void *R, InstrProfValueData *Dst, uint32_t K,
-                          uint32_t S);
-  ValueProfData *(*AllocValueProfData)(size_t TotalSizeInBytes);
-} ValueProfRecordClosure;
-
-INSTR_PROF_VISIBILITY ValueProfRecord *
-getFirstValueProfRecord(ValueProfData *VPD);
-INSTR_PROF_VISIBILITY ValueProfRecord *
-getValueProfRecordNext(ValueProfRecord *VPR);
-INSTR_PROF_VISIBILITY InstrProfValueData *
-getValueProfRecordValueData(ValueProfRecord *VPR);
-INSTR_PROF_VISIBILITY uint32_t
-getValueProfRecordHeaderSize(uint32_t NumValueSites);
-
-#undef INSTR_PROF_VALUE_PROF_DATA
-#endif  /* INSTR_PROF_VALUE_PROF_DATA */
-
-
-#ifdef INSTR_PROF_COMMON_API_IMPL
-#define INSTR_PROF_DATA_DEFINED
-#ifdef __cplusplus
-#define INSTR_PROF_INLINE inline
-#define INSTR_PROF_NULLPTR nullptr
-#else
-#define INSTR_PROF_INLINE
-#define INSTR_PROF_NULLPTR NULL
-#endif
-
-#ifndef offsetof
-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
-#endif
-
-/*!
- * Return the \c ValueProfRecord header size including the
- * padding bytes.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordHeaderSize(uint32_t NumValueSites) {
-  uint32_t Size = offsetof(ValueProfRecord, SiteCountArray) +
-                  sizeof(uint8_t) * NumValueSites;
-  /* Round the size to multiple of 8 bytes. */
-  Size = (Size + 7) & ~7;
-  return Size;
-}
-
-/*!
- * Return the total size of the value profile record including the
- * header and the value data.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordSize(uint32_t NumValueSites,
-                                uint32_t NumValueData) {
-  return getValueProfRecordHeaderSize(NumValueSites) +
-         sizeof(InstrProfValueData) * NumValueData;
-}
-
-/*!
- * Return the pointer to the start of value data array.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-InstrProfValueData *getValueProfRecordValueData(ValueProfRecord *This) {
-  return (InstrProfValueData *)((char *)This + getValueProfRecordHeaderSize(
-                                                   This->NumValueSites));
-}
-
-/*!
- * Return the total number of value data for \c This record.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordNumValueData(ValueProfRecord *This) {
-  uint32_t NumValueData = 0;
-  uint32_t I;
-  for (I = 0; I < This->NumValueSites; I++)
-    NumValueData += This->SiteCountArray[I];
-  return NumValueData;
-}
-
-/*!
- * Use this method to advance to the next \c This \c ValueProfRecord.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-ValueProfRecord *getValueProfRecordNext(ValueProfRecord *This) {
-  uint32_t NumValueData = getValueProfRecordNumValueData(This);
-  return (ValueProfRecord *)((char *)This +
-                             getValueProfRecordSize(This->NumValueSites,
-                                                    NumValueData));
-}
-
-/*!
- * Return the first \c ValueProfRecord instance.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-ValueProfRecord *getFirstValueProfRecord(ValueProfData *This) {
-  return (ValueProfRecord *)((char *)This + sizeof(ValueProfData));
-}
-
-/* Closure based interfaces.  */
-
-/*!
- * Return the total size in bytes of the on-disk value profile data
- * given the data stored in Record.
- */
-INSTR_PROF_VISIBILITY uint32_t
-getValueProfDataSize(ValueProfRecordClosure *Closure) {
-  uint32_t Kind;
-  uint32_t TotalSize = sizeof(ValueProfData);
-  const void *Record = Closure->Record;
-
-  for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
-    uint32_t NumValueSites = Closure->GetNumValueSites(Record, Kind);
-    if (!NumValueSites)
-      continue;
-    TotalSize += getValueProfRecordSize(NumValueSites,
-                                        Closure->GetNumValueData(Record, Kind));
-  }
-  return TotalSize;
-}
-
-/*!
- * Extract value profile data of a function for the profile kind \c ValueKind
- * from the \c Closure and serialize the data into \c This record instance.
- */
-INSTR_PROF_VISIBILITY void
-serializeValueProfRecordFrom(ValueProfRecord *This,
-                             ValueProfRecordClosure *Closure,
-                             uint32_t ValueKind, uint32_t NumValueSites) {
-  uint32_t S;
-  const void *Record = Closure->Record;
-  This->Kind = ValueKind;
-  This->NumValueSites = NumValueSites;
-  InstrProfValueData *DstVD = getValueProfRecordValueData(This);
-
-  for (S = 0; S < NumValueSites; S++) {
-    uint32_t ND = Closure->GetNumValueDataForSite(Record, ValueKind, S);
-    This->SiteCountArray[S] = ND;
-    Closure->GetValueForSite(Record, DstVD, ValueKind, S);
-    DstVD += ND;
-  }
-}
-
-/*!
- * Extract value profile data of a function  from the \c Closure
- * and serialize the data into \c DstData if it is not NULL or heap
- * memory allocated by the \c Closure's allocator method. If \c
- * DstData is not null, the caller is expected to set the TotalSize
- * in DstData.
- */
-INSTR_PROF_VISIBILITY ValueProfData *
-serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
-                           ValueProfData *DstData) {
-  uint32_t Kind;
-  uint32_t TotalSize =
-      DstData ? DstData->TotalSize : getValueProfDataSize(Closure);
-
-  ValueProfData *VPD =
-      DstData ? DstData : Closure->AllocValueProfData(TotalSize);
-
-  VPD->TotalSize = TotalSize;
-  VPD->NumValueKinds = Closure->GetNumValueKinds(Closure->Record);
-  ValueProfRecord *VR = getFirstValueProfRecord(VPD);
-  for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
-    uint32_t NumValueSites = Closure->GetNumValueSites(Closure->Record, Kind);
-    if (!NumValueSites)
-      continue;
-    serializeValueProfRecordFrom(VR, Closure, Kind, NumValueSites);
-    VR = getValueProfRecordNext(VR);
-  }
-  return VPD;
-}
-
-#undef INSTR_PROF_COMMON_API_IMPL
-#endif /* INSTR_PROF_COMMON_API_IMPL */
-
-/*============================================================================*/
-
-#ifndef INSTR_PROF_DATA_DEFINED
-
-#ifndef INSTR_PROF_DATA_INC
-#define INSTR_PROF_DATA_INC
-
-/* Helper macros.  */
-#define INSTR_PROF_SIMPLE_QUOTE(x) #x
-#define INSTR_PROF_QUOTE(x) INSTR_PROF_SIMPLE_QUOTE(x)
-#define INSTR_PROF_SIMPLE_CONCAT(x,y) x ## y
-#define INSTR_PROF_CONCAT(x,y) INSTR_PROF_SIMPLE_CONCAT(x,y)
-
-/* Magic number to detect file format and endianness.
- * Use 255 at one end, since no UTF-8 file can use that character.  Avoid 0,
- * so that utilities, like strings, don't grab it as a string.  129 is also
- * invalid UTF-8, and high enough to be interesting.
- * Use "lprofr" in the centre to stand for "LLVM Profile Raw", or "lprofR"
- * for 32-bit platforms.
- */
-#define INSTR_PROF_RAW_MAGIC_64 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
-       (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
-        (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129
-#define INSTR_PROF_RAW_MAGIC_32 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
-       (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
-        (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
-
-/* Raw profile format version (start from 1). */
-#define INSTR_PROF_RAW_VERSION 8
-/* Indexed profile format version (start from 1). */
-#define INSTR_PROF_INDEX_VERSION 7
-/* Coverage mapping format version (start from 0). */
-#define INSTR_PROF_COVMAP_VERSION 5
-
-/* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
- * version for other variants of profile. We set the lowest bit of the upper 8
- * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
- * generated profile, and 0 if this is a Clang FE generated profile.
- * 1 in bit 57 indicates there are context-sensitive records in the profile.
- */
-#define VARIANT_MASKS_ALL 0xff00000000000000ULL
-#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
-#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
-#define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
-#define VARIANT_MASK_INSTR_ENTRY (0x1ULL << 58)
-#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
-#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
-#define INSTR_PROF_PROFILE_COUNTER_BIAS_VAR __llvm_profile_counter_bias
-
-/* The variable that holds the name of the profile data
- * specified via command line. */
-#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
-
-/* section name strings common to all targets other
-   than WIN32 */
-#define INSTR_PROF_DATA_COMMON __llvm_prf_data
-#define INSTR_PROF_NAME_COMMON __llvm_prf_names
-#define INSTR_PROF_CNTS_COMMON __llvm_prf_cnts
-#define INSTR_PROF_VALS_COMMON __llvm_prf_vals
-#define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
-#define INSTR_PROF_COVMAP_COMMON __llvm_covmap
-#define INSTR_PROF_COVFUN_COMMON __llvm_covfun
-#define INSTR_PROF_ORDERFILE_COMMON __llvm_orderfile
-/* Windows section names. Because these section names contain dollar characters,
- * they must be quoted.
- */
-#define INSTR_PROF_DATA_COFF ".lprfd$M"
-#define INSTR_PROF_NAME_COFF ".lprfn$M"
-#define INSTR_PROF_CNTS_COFF ".lprfc$M"
-#define INSTR_PROF_VALS_COFF ".lprfv$M"
-#define INSTR_PROF_VNODES_COFF ".lprfnd$M"
-#define INSTR_PROF_COVMAP_COFF ".lcovmap$M"
-#define INSTR_PROF_COVFUN_COFF ".lcovfun$M"
-#define INSTR_PROF_ORDERFILE_COFF ".lorderfile$M"
-
-#ifdef _WIN32
-/* Runtime section names and name strings.  */
-#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COFF
-#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COFF
-#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COFF
-/* Array of pointers. Each pointer points to a list
- * of value nodes associated with one value site.
- */
-#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COFF
-/* Value profile nodes section. */
-#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF
-#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF
-#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_COVFUN_COFF
-#define INSTR_PROF_ORDERFILE_SECT_NAME INSTR_PROF_ORDERFILE_COFF
-#else
-/* Runtime section names and name strings.  */
-#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON)
-#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON)
-#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON)
-/* Array of pointers. Each pointer points to a list
- * of value nodes associated with one value site.
- */
-#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON)
-/* Value profile nodes section. */
-#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON)
-#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON)
-#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON)
-/* Order file instrumentation. */
-#define INSTR_PROF_ORDERFILE_SECT_NAME                                         \
-  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON)
-#endif
-
-#define INSTR_PROF_ORDERFILE_BUFFER_NAME _llvm_order_file_buffer
-#define INSTR_PROF_ORDERFILE_BUFFER_NAME_STR                                   \
-  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_NAME)
-#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME _llvm_order_file_buffer_idx
-#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME_STR                               \
-  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME)
-
-/* Macros to define start/stop section symbol for a given
- * section on Linux. For instance
- * INSTR_PROF_SECT_START(INSTR_PROF_DATA_SECT_NAME) will
- * expand to __start___llvm_prof_data
- */
-#define INSTR_PROF_SECT_START(Sect) \
-        INSTR_PROF_CONCAT(__start_,Sect)
-#define INSTR_PROF_SECT_STOP(Sect) \
-        INSTR_PROF_CONCAT(__stop_,Sect)
-
-/* Value Profiling API linkage name.  */
-#define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
-#define INSTR_PROF_VALUE_PROF_FUNC_STR \
-        INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
-#define INSTR_PROF_VALUE_PROF_MEMOP_FUNC __llvm_profile_instrument_memop
-#define INSTR_PROF_VALUE_PROF_MEMOP_FUNC_STR                                   \
-  INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_MEMOP_FUNC)
-
-/* InstrProfile per-function control data alignment.  */
-#define INSTR_PROF_DATA_ALIGNMENT 8
-
-/* The data structure that represents a tracked value by the
- * value profiler.
- */
-typedef struct InstrProfValueData {
-  /* Profiled value. */
-  uint64_t Value;
-  /* Number of times the value appears in the training run. */
-  uint64_t Count;
-} InstrProfValueData;
-
-#endif /* INSTR_PROF_DATA_INC */
-
-#ifndef INSTR_ORDER_FILE_INC
-/* The maximal # of functions: 128*1024 (the buffer size will be 128*4 KB). */
-#define INSTR_ORDER_FILE_BUFFER_SIZE 131072
-#define INSTR_ORDER_FILE_BUFFER_BITS 17
-#define INSTR_ORDER_FILE_BUFFER_MASK 0x1ffff
-#endif /* INSTR_ORDER_FILE_INC */
-#else
-#undef INSTR_PROF_DATA_DEFINED
-#endif
-
-#undef COVMAP_V2_OR_V3
-
-#ifdef INSTR_PROF_VALUE_PROF_MEMOP_API
-
-#ifdef __cplusplus
-#define INSTR_PROF_INLINE inline
-#else
-#define INSTR_PROF_INLINE
-#endif
-
-/* The value range buckets (22 buckets) for the memop size value profiling looks
- * like:
- *
- *   [0, 0]
- *   [1, 1]
- *   [2, 2]
- *   [3, 3]
- *   [4, 4]
- *   [5, 5]
- *   [6, 6]
- *   [7, 7]
- *   [8, 8]
- *   [9, 15]
- *   [16, 16]
- *   [17, 31]
- *   [32, 32]
- *   [33, 63]
- *   [64, 64]
- *   [65, 127]
- *   [128, 128]
- *   [129, 255]
- *   [256, 256]
- *   [257, 511]
- *   [512, 512]
- *   [513, UINT64_MAX]
- *
- * Each range has a 'representative value' which is the lower end value of the
- * range and used to store in the runtime profile data records and the VP
- * metadata. For example, it's 2 for [2, 2] and 64 for [65, 127].
- */
-#define INSTR_PROF_NUM_BUCKETS 22
-
-/*
- * Clz and Popcount. This code was copied from
- * compiler-rt/lib/fuzzer/{FuzzerBuiltins.h,FuzzerBuiltinsMsvc.h} and
- * llvm/include/llvm/Support/MathExtras.h.
- */
-#if defined(_MSC_VER) && !defined(__clang__)
-
-#include <intrin.h>
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-int InstProfClzll(unsigned long long X) {
-  unsigned long LeadZeroIdx = 0;
-#if !defined(_M_ARM64) && !defined(_M_X64)
-  // Scan the high 32 bits.
-  if (_BitScanReverse(&LeadZeroIdx, (unsigned long)(X >> 32)))
-    return (int)(63 - (LeadZeroIdx + 32)); // Create a bit offset
-                                                      // from the MSB.
-  // Scan the low 32 bits.
-  if (_BitScanReverse(&LeadZeroIdx, (unsigned long)(X)))
-    return (int)(63 - LeadZeroIdx);
-#else
-  if (_BitScanReverse64(&LeadZeroIdx, X)) return 63 - LeadZeroIdx;
-#endif
-  return 64;
-}
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-int InstProfPopcountll(unsigned long long X) {
-  // This code originates from https://reviews.llvm.org/rG30626254510f.
-  unsigned long long v = X;
-  v = v - ((v >> 1) & 0x5555555555555555ULL);
-  v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
-  v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
-  return (int)((unsigned long long)(v * 0x0101010101010101ULL) >> 56);
-}
-
-#else
-
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-int InstProfClzll(unsigned long long X) { return __builtin_clzll(X); }
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-int InstProfPopcountll(unsigned long long X) { return __builtin_popcountll(X); }
-
-#endif  /* defined(_MSC_VER) && !defined(__clang__) */
-
-/* Map an (observed) memop size value to the representative value of its range.
- * For example, 5 -> 5, 22 -> 17, 99 -> 65, 256 -> 256, 1001 -> 513. */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE uint64_t
-InstrProfGetRangeRepValue(uint64_t Value) {
-  if (Value <= 8)
-    // The first ranges are individually tracked. Use the value as is.
-    return Value;
-  else if (Value >= 513)
-    // The last range is mapped to its lowest value.
-    return 513;
-  else if (InstProfPopcountll(Value) == 1)
-    // If it's a power of two, use it as is.
-    return Value;
-  else
-    // Otherwise, take to the previous power of two + 1.
-    return (UINT64_C(1) << (64 - InstProfClzll(Value) - 1)) + 1;
-}
-
-/* Return true if the range that an (observed) memop size value belongs to has
- * only a single value in the range.  For example, 0 -> true, 8 -> true, 10 ->
- * false, 64 -> true, 100 -> false, 513 -> false. */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE unsigned
-InstrProfIsSingleValRange(uint64_t Value) {
-  if (Value <= 8)
-    // The first ranges are individually tracked.
-    return 1;
-  else if (InstProfPopcountll(Value) == 1)
-    // If it's a power of two, there's only one value.
-    return 1;
-  else
-    // Otherwise, there's more than one value in the range.
-    return 0;
-}
-
-#endif /* INSTR_PROF_VALUE_PROF_MEMOP_API */
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h b/linux-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h
deleted file mode 100644
index d6209a3..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h
+++ /dev/null
@@ -1,159 +0,0 @@
-//===-- dfsan_interface.h -------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of DataFlowSanitizer.
-//
-// Public interface header.
-//===----------------------------------------------------------------------===//
-#ifndef DFSAN_INTERFACE_H
-#define DFSAN_INTERFACE_H
-
-#include <stddef.h>
-#include <stdint.h>
-#include <sanitizer/common_interface_defs.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef uint8_t dfsan_label;
-typedef uint32_t dfsan_origin;
-
-/// Signature of the callback argument to dfsan_set_write_callback().
-typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
-
-/// Computes the union of \c l1 and \c l2, resulting in a union label.
-dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
-
-/// Sets the label for each address in [addr,addr+size) to \c label.
-void dfsan_set_label(dfsan_label label, void *addr, size_t size);
-
-/// Sets the label for each address in [addr,addr+size) to the union of the
-/// current label for that address and \c label.
-void dfsan_add_label(dfsan_label label, void *addr, size_t size);
-
-/// Retrieves the label associated with the given data.
-///
-/// The type of 'data' is arbitrary.  The function accepts a value of any type,
-/// which can be truncated or extended (implicitly or explicitly) as necessary.
-/// The truncation/extension operations will preserve the label of the original
-/// value.
-dfsan_label dfsan_get_label(long data);
-
-/// Retrieves the immediate origin associated with the given data. The returned
-/// origin may point to another origin.
-///
-/// The type of 'data' is arbitrary.
-dfsan_origin dfsan_get_origin(long data);
-
-/// Retrieves the label associated with the data at the given address.
-dfsan_label dfsan_read_label(const void *addr, size_t size);
-
-/// Returns whether the given label label contains the label elem.
-int dfsan_has_label(dfsan_label label, dfsan_label elem);
-
-/// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
-/// with the application memory.  Use this call to start over the taint tracking
-/// within the same process.
-///
-/// Note: If another thread is working with tainted data during the flush, that
-/// taint could still be written to shadow after the flush.
-void dfsan_flush(void);
-
-/// Sets a callback to be invoked on calls to write().  The callback is invoked
-/// before the write is done.  The write is not guaranteed to succeed when the
-/// callback executes.  Pass in NULL to remove any callback.
-void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
-
-/// Interceptor hooks.
-/// Whenever a dfsan's custom function is called the corresponding
-/// hook is called it non-zero. The hooks should be defined by the user.
-/// The primary use case is taint-guided fuzzing, where the fuzzer
-/// needs to see the parameters of the function and the labels.
-/// FIXME: implement more hooks.
-void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2,
-                            size_t n, dfsan_label s1_label,
-                            dfsan_label s2_label, dfsan_label n_label);
-void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
-                             size_t n, dfsan_label s1_label,
-                             dfsan_label s2_label, dfsan_label n_label);
-
-/// Prints the origin trace of the label at the address addr to stderr. It also
-/// prints description at the beginning of the trace. If origin tracking is not
-/// on, or the address is not labeled, it prints nothing.
-void dfsan_print_origin_trace(const void *addr, const char *description);
-
-/// Prints the origin trace of the label at the address \p addr to a
-/// pre-allocated output buffer. If origin tracking is not on, or the address is
-/// not labeled, it prints nothing.
-///
-/// Typical usage:
-/// \code
-///   char kDescription[] = "...";
-///   char buf[1024];
-///   dfsan_sprint_origin_trace(&tainted_var, kDescription, buf, sizeof(buf));
-/// \endcode
-///
-/// Typical usage that handles truncation:
-/// \code
-///   char buf[1024];
-///   int len = dfsan_sprint_origin_trace(&var, nullptr, buf, sizeof(buf));
-///
-///   if (len < sizeof(buf)) {
-///     ProcessOriginTrace(buf);
-///   } else {
-///     char *tmpbuf = new char[len + 1];
-///     dfsan_sprint_origin_trace(&var, nullptr, tmpbuf, len + 1);
-///     ProcessOriginTrace(tmpbuf);
-///     delete[] tmpbuf;
-///   }
-/// \endcode
-///
-/// \param addr The tainted memory address whose origin we are printing.
-/// \param description A description printed at the beginning of the trace.
-/// \param [out] out_buf The output buffer to write the results to.
-/// \param out_buf_size The size of \p out_buf.
-///
-/// \returns The number of symbols that should have been written to \p out_buf
-/// (not including trailing null byte '\0'). Thus, the string is truncated iff
-/// return value is not less than \p out_buf_size.
-size_t dfsan_sprint_origin_trace(const void *addr, const char *description,
-                                 char *out_buf, size_t out_buf_size);
-
-/// Prints the stack trace leading to this call to a pre-allocated output
-/// buffer.
-///
-/// For usage examples, see dfsan_sprint_origin_trace.
-///
-/// \param [out] out_buf The output buffer to write the results to.
-/// \param out_buf_size The size of \p out_buf.
-///
-/// \returns The number of symbols that should have been written to \p out_buf
-/// (not including trailing null byte '\0'). Thus, the string is truncated iff
-/// return value is not less than \p out_buf_size.
-size_t dfsan_sprint_stack_trace(char *out_buf, size_t out_buf_size);
-
-/// Retrieves the very first origin associated with the data at the given
-/// address.
-dfsan_origin dfsan_get_init_origin(const void *addr);
-
-/// Returns the value of -dfsan-track-origins.
-/// * 0: do not track origins.
-/// * 1: track origins at memory store operations.
-/// * 2: track origins at memory load and store operations.
-int dfsan_get_track_origins(void);
-#ifdef __cplusplus
-}  // extern "C"
-
-template <typename T> void dfsan_set_label(dfsan_label label, T &data) {
-  dfsan_set_label(label, (void *)&data, sizeof(T));
-}
-
-#endif
-
-#endif  // DFSAN_INTERFACE_H
diff --git a/linux-x86/lib64/clang/14.0.2/include/stdint.h b/linux-x86/lib64/clang/14.0.2/include/stdint.h
deleted file mode 100644
index 192f653..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/stdint.h
+++ /dev/null
@@ -1,693 +0,0 @@
-/*===---- stdint.h - Standard header for sized integer types --------------===*\
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
-\*===----------------------------------------------------------------------===*/
-
-#ifndef __CLANG_STDINT_H
-// AIX system headers need stdint.h to be re-enterable while _STD_TYPES_T
-// is defined until an inclusion of it without _STD_TYPES_T occurs, in which
-// case the header guard macro is defined.
-#if !defined(_AIX) || !defined(_STD_TYPES_T) || !defined(__STDC_HOSTED__)
-#define __CLANG_STDINT_H
-#endif
-
-/* If we're hosted, fall back to the system's stdint.h, which might have
- * additional definitions.
- */
-#if __STDC_HOSTED__ && __has_include_next(<stdint.h>)
-
-// C99 7.18.3 Limits of other integer types
-//
-//  Footnote 219, 220: C++ implementations should define these macros only when
-//  __STDC_LIMIT_MACROS is defined before <stdint.h> is included.
-//
-//  Footnote 222: C++ implementations should define these macros only when
-//  __STDC_CONSTANT_MACROS is defined before <stdint.h> is included.
-//
-// C++11 [cstdint.syn]p2:
-//
-//  The macros defined by <cstdint> are provided unconditionally. In particular,
-//  the symbols __STDC_LIMIT_MACROS and __STDC_CONSTANT_MACROS (mentioned in
-//  footnotes 219, 220, and 222 in the C standard) play no role in C++.
-//
-// C11 removed the problematic footnotes.
-//
-// Work around this inconsistency by always defining those macros in C++ mode,
-// so that a C library implementation which follows the C99 standard can be
-// used in C++.
-# ifdef __cplusplus
-#  if !defined(__STDC_LIMIT_MACROS)
-#   define __STDC_LIMIT_MACROS
-#   define __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
-#  endif
-#  if !defined(__STDC_CONSTANT_MACROS)
-#   define __STDC_CONSTANT_MACROS
-#   define __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
-#  endif
-# endif
-
-# include_next <stdint.h>
-
-# ifdef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
-#  undef __STDC_LIMIT_MACROS
-#  undef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
-# endif
-# ifdef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
-#  undef __STDC_CONSTANT_MACROS
-#  undef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
-# endif
-
-#else
-
-/* C99 7.18.1.1 Exact-width integer types.
- * C99 7.18.1.2 Minimum-width integer types.
- * C99 7.18.1.3 Fastest minimum-width integer types.
- *
- * The standard requires that exact-width type be defined for 8-, 16-, 32-, and
- * 64-bit types if they are implemented. Other exact width types are optional.
- * This implementation defines an exact-width types for every integer width
- * that is represented in the standard integer types.
- *
- * The standard also requires minimum-width types be defined for 8-, 16-, 32-,
- * and 64-bit widths regardless of whether there are corresponding exact-width
- * types.
- *
- * To accommodate targets that are missing types that are exactly 8, 16, 32, or
- * 64 bits wide, this implementation takes an approach of cascading
- * redefinitions, redefining __int_leastN_t to successively smaller exact-width
- * types. It is therefore important that the types are defined in order of
- * descending widths.
- *
- * We currently assume that the minimum-width types and the fastest
- * minimum-width types are the same. This is allowed by the standard, but is
- * suboptimal.
- *
- * In violation of the standard, some targets do not implement a type that is
- * wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit).
- * To accommodate these targets, a required minimum-width type is only
- * defined if there exists an exact-width type of equal or greater width.
- */
-
-#ifdef __INT64_TYPE__
-# ifndef __int8_t_defined /* glibc sys/types.h also defines int64_t*/
-typedef __INT64_TYPE__ int64_t;
-# endif /* __int8_t_defined */
-typedef __UINT64_TYPE__ uint64_t;
-# define __int_least64_t int64_t
-# define __uint_least64_t uint64_t
-# define __int_least32_t int64_t
-# define __uint_least32_t uint64_t
-# define __int_least16_t int64_t
-# define __uint_least16_t uint64_t
-# define __int_least8_t int64_t
-# define __uint_least8_t uint64_t
-#endif /* __INT64_TYPE__ */
-
-#ifdef __int_least64_t
-typedef __int_least64_t int_least64_t;
-typedef __uint_least64_t uint_least64_t;
-typedef __int_least64_t int_fast64_t;
-typedef __uint_least64_t uint_fast64_t;
-#endif /* __int_least64_t */
-
-#ifdef __INT56_TYPE__
-typedef __INT56_TYPE__ int56_t;
-typedef __UINT56_TYPE__ uint56_t;
-typedef int56_t int_least56_t;
-typedef uint56_t uint_least56_t;
-typedef int56_t int_fast56_t;
-typedef uint56_t uint_fast56_t;
-# define __int_least32_t int56_t
-# define __uint_least32_t uint56_t
-# define __int_least16_t int56_t
-# define __uint_least16_t uint56_t
-# define __int_least8_t int56_t
-# define __uint_least8_t uint56_t
-#endif /* __INT56_TYPE__ */
-
-
-#ifdef __INT48_TYPE__
-typedef __INT48_TYPE__ int48_t;
-typedef __UINT48_TYPE__ uint48_t;
-typedef int48_t int_least48_t;
-typedef uint48_t uint_least48_t;
-typedef int48_t int_fast48_t;
-typedef uint48_t uint_fast48_t;
-# define __int_least32_t int48_t
-# define __uint_least32_t uint48_t
-# define __int_least16_t int48_t
-# define __uint_least16_t uint48_t
-# define __int_least8_t int48_t
-# define __uint_least8_t uint48_t
-#endif /* __INT48_TYPE__ */
-
-
-#ifdef __INT40_TYPE__
-typedef __INT40_TYPE__ int40_t;
-typedef __UINT40_TYPE__ uint40_t;
-typedef int40_t int_least40_t;
-typedef uint40_t uint_least40_t;
-typedef int40_t int_fast40_t;
-typedef uint40_t uint_fast40_t;
-# define __int_least32_t int40_t
-# define __uint_least32_t uint40_t
-# define __int_least16_t int40_t
-# define __uint_least16_t uint40_t
-# define __int_least8_t int40_t
-# define __uint_least8_t uint40_t
-#endif /* __INT40_TYPE__ */
-
-
-#ifdef __INT32_TYPE__
-
-# ifndef __int8_t_defined /* glibc sys/types.h also defines int32_t*/
-typedef __INT32_TYPE__ int32_t;
-# endif /* __int8_t_defined */
-
-# ifndef __uint32_t_defined  /* more glibc compatibility */
-# define __uint32_t_defined
-typedef __UINT32_TYPE__ uint32_t;
-# endif /* __uint32_t_defined */
-
-# define __int_least32_t int32_t
-# define __uint_least32_t uint32_t
-# define __int_least16_t int32_t
-# define __uint_least16_t uint32_t
-# define __int_least8_t int32_t
-# define __uint_least8_t uint32_t
-#endif /* __INT32_TYPE__ */
-
-#ifdef __int_least32_t
-typedef __int_least32_t int_least32_t;
-typedef __uint_least32_t uint_least32_t;
-typedef __int_least32_t int_fast32_t;
-typedef __uint_least32_t uint_fast32_t;
-#endif /* __int_least32_t */
-
-#ifdef __INT24_TYPE__
-typedef __INT24_TYPE__ int24_t;
-typedef __UINT24_TYPE__ uint24_t;
-typedef int24_t int_least24_t;
-typedef uint24_t uint_least24_t;
-typedef int24_t int_fast24_t;
-typedef uint24_t uint_fast24_t;
-# define __int_least16_t int24_t
-# define __uint_least16_t uint24_t
-# define __int_least8_t int24_t
-# define __uint_least8_t uint24_t
-#endif /* __INT24_TYPE__ */
-
-#ifdef __INT16_TYPE__
-#ifndef __int8_t_defined /* glibc sys/types.h also defines int16_t*/
-typedef __INT16_TYPE__ int16_t;
-#endif /* __int8_t_defined */
-typedef __UINT16_TYPE__ uint16_t;
-# define __int_least16_t int16_t
-# define __uint_least16_t uint16_t
-# define __int_least8_t int16_t
-# define __uint_least8_t uint16_t
-#endif /* __INT16_TYPE__ */
-
-#ifdef __int_least16_t
-typedef __int_least16_t int_least16_t;
-typedef __uint_least16_t uint_least16_t;
-typedef __int_least16_t int_fast16_t;
-typedef __uint_least16_t uint_fast16_t;
-#endif /* __int_least16_t */
-
-
-#ifdef __INT8_TYPE__
-#ifndef __int8_t_defined  /* glibc sys/types.h also defines int8_t*/
-typedef __INT8_TYPE__ int8_t;
-#endif /* __int8_t_defined */
-typedef __UINT8_TYPE__ uint8_t;
-# define __int_least8_t int8_t
-# define __uint_least8_t uint8_t
-#endif /* __INT8_TYPE__ */
-
-#ifdef __int_least8_t
-typedef __int_least8_t int_least8_t;
-typedef __uint_least8_t uint_least8_t;
-typedef __int_least8_t int_fast8_t;
-typedef __uint_least8_t uint_fast8_t;
-#endif /* __int_least8_t */
-
-/* prevent glibc sys/types.h from defining conflicting types */
-#ifndef __int8_t_defined
-# define __int8_t_defined
-#endif /* __int8_t_defined */
-
-/* C99 7.18.1.4 Integer types capable of holding object pointers.
- */
-#define __stdint_join3(a,b,c) a ## b ## c
-
-#ifndef _INTPTR_T
-#ifndef __intptr_t_defined
-typedef __INTPTR_TYPE__ intptr_t;
-#define __intptr_t_defined
-#define _INTPTR_T
-#endif
-#endif
-
-#ifndef _UINTPTR_T
-typedef __UINTPTR_TYPE__ uintptr_t;
-#define _UINTPTR_T
-#endif
-
-/* C99 7.18.1.5 Greatest-width integer types.
- */
-typedef __INTMAX_TYPE__  intmax_t;
-typedef __UINTMAX_TYPE__ uintmax_t;
-
-/* C99 7.18.4 Macros for minimum-width integer constants.
- *
- * The standard requires that integer constant macros be defined for all the
- * minimum-width types defined above. As 8-, 16-, 32-, and 64-bit minimum-width
- * types are required, the corresponding integer constant macros are defined
- * here. This implementation also defines minimum-width types for every other
- * integer width that the target implements, so corresponding macros are
- * defined below, too.
- *
- * These macros are defined using the same successive-shrinking approach as
- * the type definitions above. It is likewise important that macros are defined
- * in order of decending width.
- *
- * Note that C++ should not check __STDC_CONSTANT_MACROS here, contrary to the
- * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
- */
-
-#define __int_c_join(a, b) a ## b
-#define __int_c(v, suffix) __int_c_join(v, suffix)
-#define __uint_c(v, suffix) __int_c_join(v##U, suffix)
-
-
-#ifdef __INT64_TYPE__
-# ifdef __INT64_C_SUFFIX__
-#  define __int64_c_suffix __INT64_C_SUFFIX__
-#  define __int32_c_suffix __INT64_C_SUFFIX__
-#  define __int16_c_suffix __INT64_C_SUFFIX__
-#  define  __int8_c_suffix __INT64_C_SUFFIX__
-# else
-#  undef __int64_c_suffix
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT64_C_SUFFIX__ */
-#endif /* __INT64_TYPE__ */
-
-#ifdef __int_least64_t
-# ifdef __int64_c_suffix
-#  define INT64_C(v) __int_c(v, __int64_c_suffix)
-#  define UINT64_C(v) __uint_c(v, __int64_c_suffix)
-# else
-#  define INT64_C(v) v
-#  define UINT64_C(v) v ## U
-# endif /* __int64_c_suffix */
-#endif /* __int_least64_t */
-
-
-#ifdef __INT56_TYPE__
-# ifdef __INT56_C_SUFFIX__
-#  define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)
-#  define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)
-#  define __int32_c_suffix __INT56_C_SUFFIX__
-#  define __int16_c_suffix __INT56_C_SUFFIX__
-#  define __int8_c_suffix  __INT56_C_SUFFIX__
-# else
-#  define INT56_C(v) v
-#  define UINT56_C(v) v ## U
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT56_C_SUFFIX__ */
-#endif /* __INT56_TYPE__ */
-
-
-#ifdef __INT48_TYPE__
-# ifdef __INT48_C_SUFFIX__
-#  define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)
-#  define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)
-#  define __int32_c_suffix __INT48_C_SUFFIX__
-#  define __int16_c_suffix __INT48_C_SUFFIX__
-#  define __int8_c_suffix  __INT48_C_SUFFIX__
-# else
-#  define INT48_C(v) v
-#  define UINT48_C(v) v ## U
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT48_C_SUFFIX__ */
-#endif /* __INT48_TYPE__ */
-
-
-#ifdef __INT40_TYPE__
-# ifdef __INT40_C_SUFFIX__
-#  define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)
-#  define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)
-#  define __int32_c_suffix __INT40_C_SUFFIX__
-#  define __int16_c_suffix __INT40_C_SUFFIX__
-#  define __int8_c_suffix  __INT40_C_SUFFIX__
-# else
-#  define INT40_C(v) v
-#  define UINT40_C(v) v ## U
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT40_C_SUFFIX__ */
-#endif /* __INT40_TYPE__ */
-
-
-#ifdef __INT32_TYPE__
-# ifdef __INT32_C_SUFFIX__
-#  define __int32_c_suffix __INT32_C_SUFFIX__
-#  define __int16_c_suffix __INT32_C_SUFFIX__
-#  define __int8_c_suffix  __INT32_C_SUFFIX__
-#else
-#  undef __int32_c_suffix
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT32_C_SUFFIX__ */
-#endif /* __INT32_TYPE__ */
-
-#ifdef __int_least32_t
-# ifdef __int32_c_suffix
-#  define INT32_C(v) __int_c(v, __int32_c_suffix)
-#  define UINT32_C(v) __uint_c(v, __int32_c_suffix)
-# else
-#  define INT32_C(v) v
-#  define UINT32_C(v) v ## U
-# endif /* __int32_c_suffix */
-#endif /* __int_least32_t */
-
-
-#ifdef __INT24_TYPE__
-# ifdef __INT24_C_SUFFIX__
-#  define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)
-#  define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)
-#  define __int16_c_suffix __INT24_C_SUFFIX__
-#  define __int8_c_suffix  __INT24_C_SUFFIX__
-# else
-#  define INT24_C(v) v
-#  define UINT24_C(v) v ## U
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT24_C_SUFFIX__ */
-#endif /* __INT24_TYPE__ */
-
-
-#ifdef __INT16_TYPE__
-# ifdef __INT16_C_SUFFIX__
-#  define __int16_c_suffix __INT16_C_SUFFIX__
-#  define __int8_c_suffix  __INT16_C_SUFFIX__
-#else
-#  undef __int16_c_suffix
-#  undef  __int8_c_suffix
-# endif /* __INT16_C_SUFFIX__ */
-#endif /* __INT16_TYPE__ */
-
-#ifdef __int_least16_t
-# ifdef __int16_c_suffix
-#  define INT16_C(v) __int_c(v, __int16_c_suffix)
-#  define UINT16_C(v) __uint_c(v, __int16_c_suffix)
-# else
-#  define INT16_C(v) v
-#  define UINT16_C(v) v ## U
-# endif /* __int16_c_suffix */
-#endif /* __int_least16_t */
-
-
-#ifdef __INT8_TYPE__
-# ifdef __INT8_C_SUFFIX__
-#  define __int8_c_suffix __INT8_C_SUFFIX__
-#else
-#  undef  __int8_c_suffix
-# endif /* __INT8_C_SUFFIX__ */
-#endif /* __INT8_TYPE__ */
-
-#ifdef __int_least8_t
-# ifdef __int8_c_suffix
-#  define INT8_C(v) __int_c(v, __int8_c_suffix)
-#  define UINT8_C(v) __uint_c(v, __int8_c_suffix)
-# else
-#  define INT8_C(v) v
-#  define UINT8_C(v) v ## U
-# endif /* __int8_c_suffix */
-#endif /* __int_least8_t */
-
-
-/* C99 7.18.2.1 Limits of exact-width integer types.
- * C99 7.18.2.2 Limits of minimum-width integer types.
- * C99 7.18.2.3 Limits of fastest minimum-width integer types.
- *
- * The presence of limit macros are completely optional in C99.  This
- * implementation defines limits for all of the types (exact- and
- * minimum-width) that it defines above, using the limits of the minimum-width
- * type for any types that do not have exact-width representations.
- *
- * As in the type definitions, this section takes an approach of
- * successive-shrinking to determine which limits to use for the standard (8,
- * 16, 32, 64) bit widths when they don't have exact representations. It is
- * therefore important that the definitions be kept in order of decending
- * widths.
- *
- * Note that C++ should not check __STDC_LIMIT_MACROS here, contrary to the
- * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
- */
-
-#ifdef __INT64_TYPE__
-# define INT64_MAX           INT64_C( 9223372036854775807)
-# define INT64_MIN         (-INT64_C( 9223372036854775807)-1)
-# define UINT64_MAX         UINT64_C(18446744073709551615)
-# define __INT_LEAST64_MIN   INT64_MIN
-# define __INT_LEAST64_MAX   INT64_MAX
-# define __UINT_LEAST64_MAX UINT64_MAX
-# define __INT_LEAST32_MIN   INT64_MIN
-# define __INT_LEAST32_MAX   INT64_MAX
-# define __UINT_LEAST32_MAX UINT64_MAX
-# define __INT_LEAST16_MIN   INT64_MIN
-# define __INT_LEAST16_MAX   INT64_MAX
-# define __UINT_LEAST16_MAX UINT64_MAX
-# define __INT_LEAST8_MIN    INT64_MIN
-# define __INT_LEAST8_MAX    INT64_MAX
-# define __UINT_LEAST8_MAX  UINT64_MAX
-#endif /* __INT64_TYPE__ */
-
-#ifdef __INT_LEAST64_MIN
-# define INT_LEAST64_MIN   __INT_LEAST64_MIN
-# define INT_LEAST64_MAX   __INT_LEAST64_MAX
-# define UINT_LEAST64_MAX __UINT_LEAST64_MAX
-# define INT_FAST64_MIN    __INT_LEAST64_MIN
-# define INT_FAST64_MAX    __INT_LEAST64_MAX
-# define UINT_FAST64_MAX  __UINT_LEAST64_MAX
-#endif /* __INT_LEAST64_MIN */
-
-
-#ifdef __INT56_TYPE__
-# define INT56_MAX           INT56_C(36028797018963967)
-# define INT56_MIN         (-INT56_C(36028797018963967)-1)
-# define UINT56_MAX         UINT56_C(72057594037927935)
-# define INT_LEAST56_MIN     INT56_MIN
-# define INT_LEAST56_MAX     INT56_MAX
-# define UINT_LEAST56_MAX   UINT56_MAX
-# define INT_FAST56_MIN      INT56_MIN
-# define INT_FAST56_MAX      INT56_MAX
-# define UINT_FAST56_MAX    UINT56_MAX
-# define __INT_LEAST32_MIN   INT56_MIN
-# define __INT_LEAST32_MAX   INT56_MAX
-# define __UINT_LEAST32_MAX UINT56_MAX
-# define __INT_LEAST16_MIN   INT56_MIN
-# define __INT_LEAST16_MAX   INT56_MAX
-# define __UINT_LEAST16_MAX UINT56_MAX
-# define __INT_LEAST8_MIN    INT56_MIN
-# define __INT_LEAST8_MAX    INT56_MAX
-# define __UINT_LEAST8_MAX  UINT56_MAX
-#endif /* __INT56_TYPE__ */
-
-
-#ifdef __INT48_TYPE__
-# define INT48_MAX           INT48_C(140737488355327)
-# define INT48_MIN         (-INT48_C(140737488355327)-1)
-# define UINT48_MAX         UINT48_C(281474976710655)
-# define INT_LEAST48_MIN     INT48_MIN
-# define INT_LEAST48_MAX     INT48_MAX
-# define UINT_LEAST48_MAX   UINT48_MAX
-# define INT_FAST48_MIN      INT48_MIN
-# define INT_FAST48_MAX      INT48_MAX
-# define UINT_FAST48_MAX    UINT48_MAX
-# define __INT_LEAST32_MIN   INT48_MIN
-# define __INT_LEAST32_MAX   INT48_MAX
-# define __UINT_LEAST32_MAX UINT48_MAX
-# define __INT_LEAST16_MIN   INT48_MIN
-# define __INT_LEAST16_MAX   INT48_MAX
-# define __UINT_LEAST16_MAX UINT48_MAX
-# define __INT_LEAST8_MIN    INT48_MIN
-# define __INT_LEAST8_MAX    INT48_MAX
-# define __UINT_LEAST8_MAX  UINT48_MAX
-#endif /* __INT48_TYPE__ */
-
-
-#ifdef __INT40_TYPE__
-# define INT40_MAX           INT40_C(549755813887)
-# define INT40_MIN         (-INT40_C(549755813887)-1)
-# define UINT40_MAX         UINT40_C(1099511627775)
-# define INT_LEAST40_MIN     INT40_MIN
-# define INT_LEAST40_MAX     INT40_MAX
-# define UINT_LEAST40_MAX   UINT40_MAX
-# define INT_FAST40_MIN      INT40_MIN
-# define INT_FAST40_MAX      INT40_MAX
-# define UINT_FAST40_MAX    UINT40_MAX
-# define __INT_LEAST32_MIN   INT40_MIN
-# define __INT_LEAST32_MAX   INT40_MAX
-# define __UINT_LEAST32_MAX UINT40_MAX
-# define __INT_LEAST16_MIN   INT40_MIN
-# define __INT_LEAST16_MAX   INT40_MAX
-# define __UINT_LEAST16_MAX UINT40_MAX
-# define __INT_LEAST8_MIN    INT40_MIN
-# define __INT_LEAST8_MAX    INT40_MAX
-# define __UINT_LEAST8_MAX  UINT40_MAX
-#endif /* __INT40_TYPE__ */
-
-
-#ifdef __INT32_TYPE__
-# define INT32_MAX           INT32_C(2147483647)
-# define INT32_MIN         (-INT32_C(2147483647)-1)
-# define UINT32_MAX         UINT32_C(4294967295)
-# define __INT_LEAST32_MIN   INT32_MIN
-# define __INT_LEAST32_MAX   INT32_MAX
-# define __UINT_LEAST32_MAX UINT32_MAX
-# define __INT_LEAST16_MIN   INT32_MIN
-# define __INT_LEAST16_MAX   INT32_MAX
-# define __UINT_LEAST16_MAX UINT32_MAX
-# define __INT_LEAST8_MIN    INT32_MIN
-# define __INT_LEAST8_MAX    INT32_MAX
-# define __UINT_LEAST8_MAX  UINT32_MAX
-#endif /* __INT32_TYPE__ */
-
-#ifdef __INT_LEAST32_MIN
-# define INT_LEAST32_MIN   __INT_LEAST32_MIN
-# define INT_LEAST32_MAX   __INT_LEAST32_MAX
-# define UINT_LEAST32_MAX __UINT_LEAST32_MAX
-# define INT_FAST32_MIN    __INT_LEAST32_MIN
-# define INT_FAST32_MAX    __INT_LEAST32_MAX
-# define UINT_FAST32_MAX  __UINT_LEAST32_MAX
-#endif /* __INT_LEAST32_MIN */
-
-
-#ifdef __INT24_TYPE__
-# define INT24_MAX           INT24_C(8388607)
-# define INT24_MIN         (-INT24_C(8388607)-1)
-# define UINT24_MAX         UINT24_C(16777215)
-# define INT_LEAST24_MIN     INT24_MIN
-# define INT_LEAST24_MAX     INT24_MAX
-# define UINT_LEAST24_MAX   UINT24_MAX
-# define INT_FAST24_MIN      INT24_MIN
-# define INT_FAST24_MAX      INT24_MAX
-# define UINT_FAST24_MAX    UINT24_MAX
-# define __INT_LEAST16_MIN   INT24_MIN
-# define __INT_LEAST16_MAX   INT24_MAX
-# define __UINT_LEAST16_MAX UINT24_MAX
-# define __INT_LEAST8_MIN    INT24_MIN
-# define __INT_LEAST8_MAX    INT24_MAX
-# define __UINT_LEAST8_MAX  UINT24_MAX
-#endif /* __INT24_TYPE__ */
-
-
-#ifdef __INT16_TYPE__
-#define INT16_MAX            INT16_C(32767)
-#define INT16_MIN          (-INT16_C(32767)-1)
-#define UINT16_MAX          UINT16_C(65535)
-# define __INT_LEAST16_MIN   INT16_MIN
-# define __INT_LEAST16_MAX   INT16_MAX
-# define __UINT_LEAST16_MAX UINT16_MAX
-# define __INT_LEAST8_MIN    INT16_MIN
-# define __INT_LEAST8_MAX    INT16_MAX
-# define __UINT_LEAST8_MAX  UINT16_MAX
-#endif /* __INT16_TYPE__ */
-
-#ifdef __INT_LEAST16_MIN
-# define INT_LEAST16_MIN   __INT_LEAST16_MIN
-# define INT_LEAST16_MAX   __INT_LEAST16_MAX
-# define UINT_LEAST16_MAX __UINT_LEAST16_MAX
-# define INT_FAST16_MIN    __INT_LEAST16_MIN
-# define INT_FAST16_MAX    __INT_LEAST16_MAX
-# define UINT_FAST16_MAX  __UINT_LEAST16_MAX
-#endif /* __INT_LEAST16_MIN */
-
-
-#ifdef __INT8_TYPE__
-# define INT8_MAX            INT8_C(127)
-# define INT8_MIN          (-INT8_C(127)-1)
-# define UINT8_MAX          UINT8_C(255)
-# define __INT_LEAST8_MIN    INT8_MIN
-# define __INT_LEAST8_MAX    INT8_MAX
-# define __UINT_LEAST8_MAX  UINT8_MAX
-#endif /* __INT8_TYPE__ */
-
-#ifdef __INT_LEAST8_MIN
-# define INT_LEAST8_MIN   __INT_LEAST8_MIN
-# define INT_LEAST8_MAX   __INT_LEAST8_MAX
-# define UINT_LEAST8_MAX __UINT_LEAST8_MAX
-# define INT_FAST8_MIN    __INT_LEAST8_MIN
-# define INT_FAST8_MAX    __INT_LEAST8_MAX
-# define UINT_FAST8_MAX  __UINT_LEAST8_MAX
-#endif /* __INT_LEAST8_MIN */
-
-/* Some utility macros */
-#define  __INTN_MIN(n)  __stdint_join3( INT, n, _MIN)
-#define  __INTN_MAX(n)  __stdint_join3( INT, n, _MAX)
-#define __UINTN_MAX(n)  __stdint_join3(UINT, n, _MAX)
-#define  __INTN_C(n, v) __stdint_join3( INT, n, _C(v))
-#define __UINTN_C(n, v) __stdint_join3(UINT, n, _C(v))
-
-/* C99 7.18.2.4 Limits of integer types capable of holding object pointers. */
-/* C99 7.18.3 Limits of other integer types. */
-
-#define  INTPTR_MIN  (-__INTPTR_MAX__-1)
-#define  INTPTR_MAX    __INTPTR_MAX__
-#define UINTPTR_MAX   __UINTPTR_MAX__
-#define PTRDIFF_MIN (-__PTRDIFF_MAX__-1)
-#define PTRDIFF_MAX   __PTRDIFF_MAX__
-#define    SIZE_MAX      __SIZE_MAX__
-
-/* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__
- * is enabled. */
-#if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
-#define   RSIZE_MAX            (SIZE_MAX >> 1)
-#endif
-
-/* C99 7.18.2.5 Limits of greatest-width integer types. */
-#define  INTMAX_MIN (-__INTMAX_MAX__-1)
-#define  INTMAX_MAX   __INTMAX_MAX__
-#define UINTMAX_MAX  __UINTMAX_MAX__
-
-/* C99 7.18.3 Limits of other integer types. */
-#define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
-#define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
-#ifdef __WINT_UNSIGNED__
-# define WINT_MIN       __UINTN_C(__WINT_WIDTH__, 0)
-# define WINT_MAX       __UINTN_MAX(__WINT_WIDTH__)
-#else
-# define WINT_MIN       __INTN_MIN(__WINT_WIDTH__)
-# define WINT_MAX       __INTN_MAX(__WINT_WIDTH__)
-#endif
-
-#ifndef WCHAR_MAX
-# define WCHAR_MAX __WCHAR_MAX__
-#endif
-#ifndef WCHAR_MIN
-# if __WCHAR_MAX__ == __INTN_MAX(__WCHAR_WIDTH__)
-#  define WCHAR_MIN __INTN_MIN(__WCHAR_WIDTH__)
-# else
-#  define WCHAR_MIN __UINTN_C(__WCHAR_WIDTH__, 0)
-# endif
-#endif
-
-/* 7.18.4.2 Macros for greatest-width integer constants. */
-#define  INTMAX_C(v) __int_c(v,  __INTMAX_C_SUFFIX__)
-#define UINTMAX_C(v) __int_c(v, __UINTMAX_C_SUFFIX__)
-
-#endif /* __STDC_HOSTED__ */
-#endif /* __CLANG_STDINT_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/unwind.h b/linux-x86/lib64/clang/14.0.2/include/unwind.h
deleted file mode 100644
index 029524b..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/unwind.h
+++ /dev/null
@@ -1,327 +0,0 @@
-/*===---- unwind.h - Stack unwinding ----------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* See "Data Definitions for libgcc_s" in the Linux Standard Base.*/
-
-#ifndef __CLANG_UNWIND_H
-#define __CLANG_UNWIND_H
-
-#if defined(__APPLE__) && __has_include_next(<unwind.h>)
-/* Darwin (from 11.x on) provide an unwind.h. If that's available,
- * use it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
- * so define that around the include.*/
-# ifndef _GNU_SOURCE
-#  define _SHOULD_UNDEFINE_GNU_SOURCE
-#  define _GNU_SOURCE
-# endif
-// libunwind's unwind.h reflects the current visibility.  However, Mozilla
-// builds with -fvisibility=hidden and relies on gcc's unwind.h to reset the
-// visibility to default and export its contents.  gcc also allows users to
-// override its override by #defining HIDE_EXPORTS (but note, this only obeys
-// the user's -fvisibility setting; it doesn't hide any exports on its own).  We
-// imitate gcc's header here:
-# ifdef HIDE_EXPORTS
-#  include_next <unwind.h>
-# else
-#  pragma GCC visibility push(default)
-#  include_next <unwind.h>
-#  pragma GCC visibility pop
-# endif
-# ifdef _SHOULD_UNDEFINE_GNU_SOURCE
-#  undef _GNU_SOURCE
-#  undef _SHOULD_UNDEFINE_GNU_SOURCE
-# endif
-#else
-
-#include <stdint.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* It is a bit strange for a header to play with the visibility of the
-   symbols it declares, but this matches gcc's behavior and some programs
-   depend on it */
-#ifndef HIDE_EXPORTS
-#pragma GCC visibility push(default)
-#endif
-
-typedef uintptr_t _Unwind_Word __attribute__((__mode__(__unwind_word__)));
-typedef intptr_t _Unwind_Sword __attribute__((__mode__(__unwind_word__)));
-typedef uintptr_t _Unwind_Ptr;
-typedef uintptr_t _Unwind_Internal_Ptr;
-typedef uint64_t _Unwind_Exception_Class;
-
-typedef intptr_t _sleb128_t;
-typedef uintptr_t _uleb128_t;
-
-struct _Unwind_Context;
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
-struct _Unwind_Control_Block;
-typedef struct _Unwind_Control_Block _Unwind_Exception; /* Alias */
-#else
-struct _Unwind_Exception;
-typedef struct _Unwind_Exception _Unwind_Exception;
-#endif
-typedef enum {
-  _URC_NO_REASON = 0,
-#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
-    !defined(__ARM_DWARF_EH__)
-  _URC_OK = 0, /* used by ARM EHABI */
-#endif
-  _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
-
-  _URC_FATAL_PHASE2_ERROR = 2,
-  _URC_FATAL_PHASE1_ERROR = 3,
-  _URC_NORMAL_STOP = 4,
-
-  _URC_END_OF_STACK = 5,
-  _URC_HANDLER_FOUND = 6,
-  _URC_INSTALL_CONTEXT = 7,
-  _URC_CONTINUE_UNWIND = 8,
-#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
-    !defined(__ARM_DWARF_EH__)
-  _URC_FAILURE = 9 /* used by ARM EHABI */
-#endif
-} _Unwind_Reason_Code;
-
-typedef enum {
-  _UA_SEARCH_PHASE = 1,
-  _UA_CLEANUP_PHASE = 2,
-
-  _UA_HANDLER_FRAME = 4,
-  _UA_FORCE_UNWIND = 8,
-  _UA_END_OF_STACK = 16 /* gcc extension to C++ ABI */
-} _Unwind_Action;
-
-typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code,
-                                             _Unwind_Exception *);
-
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
-typedef struct _Unwind_Control_Block _Unwind_Control_Block;
-typedef uint32_t _Unwind_EHT_Header;
-
-struct _Unwind_Control_Block {
-  uint64_t exception_class;
-  void (*exception_cleanup)(_Unwind_Reason_Code, _Unwind_Control_Block *);
-  /* unwinder cache (private fields for the unwinder's use) */
-  struct {
-    uint32_t reserved1; /* forced unwind stop function, 0 if not forced */
-    uint32_t reserved2; /* personality routine */
-    uint32_t reserved3; /* callsite */
-    uint32_t reserved4; /* forced unwind stop argument */
-    uint32_t reserved5;
-  } unwinder_cache;
-  /* propagation barrier cache (valid after phase 1) */
-  struct {
-    uint32_t sp;
-    uint32_t bitpattern[5];
-  } barrier_cache;
-  /* cleanup cache (preserved over cleanup) */
-  struct {
-    uint32_t bitpattern[4];
-  } cleanup_cache;
-  /* personality cache (for personality's benefit) */
-  struct {
-    uint32_t fnstart;         /* function start address */
-    _Unwind_EHT_Header *ehtp; /* pointer to EHT entry header word */
-    uint32_t additional;      /* additional data */
-    uint32_t reserved1;
-  } pr_cache;
-  long long int : 0; /* force alignment of next item to 8-byte boundary */
-} __attribute__((__aligned__(8)));
-#else
-struct _Unwind_Exception {
-  _Unwind_Exception_Class exception_class;
-  _Unwind_Exception_Cleanup_Fn exception_cleanup;
-#if !defined (__USING_SJLJ_EXCEPTIONS__) && defined (__SEH__)
-  _Unwind_Word private_[6];
-#else
-  _Unwind_Word private_1;
-  _Unwind_Word private_2;
-#endif
-  /* The Itanium ABI requires that _Unwind_Exception objects are "double-word
-   * aligned".  GCC has interpreted this to mean "use the maximum useful
-   * alignment for the target"; so do we. */
-} __attribute__((__aligned__));
-#endif
-
-typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)(int, _Unwind_Action,
-                                               _Unwind_Exception_Class,
-                                               _Unwind_Exception *,
-                                               struct _Unwind_Context *,
-                                               void *);
-
-typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn)(int, _Unwind_Action,
-                                                      _Unwind_Exception_Class,
-                                                      _Unwind_Exception *,
-                                                      struct _Unwind_Context *);
-typedef _Unwind_Personality_Fn __personality_routine;
-
-typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *,
-                                                void *);
-
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
-typedef enum {
-  _UVRSC_CORE = 0,        /* integer register */
-  _UVRSC_VFP = 1,         /* vfp */
-  _UVRSC_WMMXD = 3,       /* Intel WMMX data register */
-  _UVRSC_WMMXC = 4        /* Intel WMMX control register */
-} _Unwind_VRS_RegClass;
-
-typedef enum {
-  _UVRSD_UINT32 = 0,
-  _UVRSD_VFPX = 1,
-  _UVRSD_UINT64 = 3,
-  _UVRSD_FLOAT = 4,
-  _UVRSD_DOUBLE = 5
-} _Unwind_VRS_DataRepresentation;
-
-typedef enum {
-  _UVRSR_OK = 0,
-  _UVRSR_NOT_IMPLEMENTED = 1,
-  _UVRSR_FAILED = 2
-} _Unwind_VRS_Result;
-
-typedef uint32_t _Unwind_State;
-#define _US_VIRTUAL_UNWIND_FRAME  ((_Unwind_State)0)
-#define _US_UNWIND_FRAME_STARTING ((_Unwind_State)1)
-#define _US_UNWIND_FRAME_RESUME   ((_Unwind_State)2)
-#define _US_ACTION_MASK           ((_Unwind_State)3)
-#define _US_FORCE_UNWIND          ((_Unwind_State)8)
-
-_Unwind_VRS_Result _Unwind_VRS_Get(struct _Unwind_Context *__context,
-  _Unwind_VRS_RegClass __regclass,
-  uint32_t __regno,
-  _Unwind_VRS_DataRepresentation __representation,
-  void *__valuep);
-
-_Unwind_VRS_Result _Unwind_VRS_Set(struct _Unwind_Context *__context,
-  _Unwind_VRS_RegClass __regclass,
-  uint32_t __regno,
-  _Unwind_VRS_DataRepresentation __representation,
-  void *__valuep);
-
-static __inline__
-_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *__context, int __index) {
-  _Unwind_Word __value;
-  _Unwind_VRS_Get(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
-  return __value;
-}
-
-static __inline__
-void _Unwind_SetGR(struct _Unwind_Context *__context, int __index,
-                   _Unwind_Word __value) {
-  _Unwind_VRS_Set(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
-}
-
-static __inline__
-_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *__context) {
-  _Unwind_Word __ip = _Unwind_GetGR(__context, 15);
-  return __ip & ~(_Unwind_Word)(0x1); /* Remove thumb mode bit. */
-}
-
-static __inline__
-void _Unwind_SetIP(struct _Unwind_Context *__context, _Unwind_Word __value) {
-  _Unwind_Word __thumb_mode_bit = _Unwind_GetGR(__context, 15) & 0x1;
-  _Unwind_SetGR(__context, 15, __value | __thumb_mode_bit);
-}
-#else
-_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *, int);
-void _Unwind_SetGR(struct _Unwind_Context *, int, _Unwind_Word);
-
-_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *);
-void _Unwind_SetIP(struct _Unwind_Context *, _Unwind_Word);
-#endif
-
-
-_Unwind_Word _Unwind_GetIPInfo(struct _Unwind_Context *, int *);
-
-_Unwind_Word _Unwind_GetCFA(struct _Unwind_Context *);
-
-_Unwind_Word _Unwind_GetBSP(struct _Unwind_Context *);
-
-void *_Unwind_GetLanguageSpecificData(struct _Unwind_Context *);
-
-_Unwind_Ptr _Unwind_GetRegionStart(struct _Unwind_Context *);
-
-/* DWARF EH functions; currently not available on Darwin/ARM */
-#if !defined(__APPLE__) || !defined(__arm__)
-_Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Exception *);
-_Unwind_Reason_Code _Unwind_ForcedUnwind(_Unwind_Exception *, _Unwind_Stop_Fn,
-                                         void *);
-void _Unwind_DeleteException(_Unwind_Exception *);
-void _Unwind_Resume(_Unwind_Exception *);
-_Unwind_Reason_Code _Unwind_Resume_or_Rethrow(_Unwind_Exception *);
-
-#endif
-
-_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void *);
-
-/* setjmp(3)/longjmp(3) stuff */
-typedef struct SjLj_Function_Context *_Unwind_FunctionContext_t;
-
-void _Unwind_SjLj_Register(_Unwind_FunctionContext_t);
-void _Unwind_SjLj_Unregister(_Unwind_FunctionContext_t);
-_Unwind_Reason_Code _Unwind_SjLj_RaiseException(_Unwind_Exception *);
-_Unwind_Reason_Code _Unwind_SjLj_ForcedUnwind(_Unwind_Exception *,
-                                              _Unwind_Stop_Fn, void *);
-void _Unwind_SjLj_Resume(_Unwind_Exception *);
-_Unwind_Reason_Code _Unwind_SjLj_Resume_or_Rethrow(_Unwind_Exception *);
-
-void *_Unwind_FindEnclosingFunction(void *);
-
-#ifdef __APPLE__
-
-_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *)
-    __attribute__((__unavailable__));
-_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *)
-    __attribute__((__unavailable__));
-
-/* Darwin-specific functions */
-void __register_frame(const void *);
-void __deregister_frame(const void *);
-
-struct dwarf_eh_bases {
-  uintptr_t tbase;
-  uintptr_t dbase;
-  uintptr_t func;
-};
-void *_Unwind_Find_FDE(const void *, struct dwarf_eh_bases *);
-
-void __register_frame_info_bases(const void *, void *, void *, void *)
-  __attribute__((__unavailable__));
-void __register_frame_info(const void *, void *) __attribute__((__unavailable__));
-void __register_frame_info_table_bases(const void *, void*, void *, void *)
-  __attribute__((__unavailable__));
-void __register_frame_info_table(const void *, void *)
-  __attribute__((__unavailable__));
-void __register_frame_table(const void *) __attribute__((__unavailable__));
-void __deregister_frame_info(const void *) __attribute__((__unavailable__));
-void __deregister_frame_info_bases(const void *)__attribute__((__unavailable__));
-
-#else
-
-_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *);
-_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *);
-
-#endif
-
-
-#ifndef HIDE_EXPORTS
-#pragma GCC visibility pop
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-#endif /* __CLANG_UNWIND_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/vaesintrin.h b/linux-x86/lib64/clang/14.0.2/include/vaesintrin.h
deleted file mode 100644
index f3c0807..0000000
--- a/linux-x86/lib64/clang/14.0.2/include/vaesintrin.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*===------------------ vaesintrin.h - VAES intrinsics ---------------------===
- *
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <vaesintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __VAESINTRIN_H
-#define __VAESINTRIN_H
-
-/* Default attributes for YMM forms. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("vaes"), __min_vector_width__(256)))
-
-/* Default attributes for ZMM forms. */
-#define __DEFAULT_FN_ATTRS_F __attribute__((__always_inline__, __nodebug__, __target__("avx512f,vaes"), __min_vector_width__(512)))
-
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
- _mm256_aesenc_epi128(__m256i __A, __m256i __B)
-{
-  return (__m256i) __builtin_ia32_aesenc256((__v4di) __A,
-              (__v4di) __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
- _mm256_aesdec_epi128(__m256i __A, __m256i __B)
-{
-  return (__m256i) __builtin_ia32_aesdec256((__v4di) __A,
-              (__v4di) __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
- _mm256_aesenclast_epi128(__m256i __A, __m256i __B)
-{
-  return (__m256i) __builtin_ia32_aesenclast256((__v4di) __A,
-              (__v4di) __B);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
- _mm256_aesdeclast_epi128(__m256i __A, __m256i __B)
-{
-  return (__m256i) __builtin_ia32_aesdeclast256((__v4di) __A,
-              (__v4di) __B);
-}
-
-#ifdef __AVX512FINTRIN_H
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
- _mm512_aesenc_epi128(__m512i __A, __m512i __B)
-{
-  return (__m512i) __builtin_ia32_aesenc512((__v8di) __A,
-              (__v8di) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
- _mm512_aesdec_epi128(__m512i __A, __m512i __B)
-{
-  return (__m512i) __builtin_ia32_aesdec512((__v8di) __A,
-              (__v8di) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
- _mm512_aesenclast_epi128(__m512i __A, __m512i __B)
-{
-  return (__m512i) __builtin_ia32_aesenclast512((__v8di) __A,
-              (__v8di) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
- _mm512_aesdeclast_epi128(__m512i __A, __m512i __B)
-{
-  return (__m512i) __builtin_ia32_aesdeclast512((__v8di) __A,
-              (__v8di) __B);
-}
-#endif // __AVX512FINTRIN_H
-
-#undef __DEFAULT_FN_ATTRS
-#undef __DEFAULT_FN_ATTRS_F
-
-#endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_builtin_vars.h b/linux-x86/lib64/clang/14.0.6/include/__clang_cuda_builtin_vars.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_builtin_vars.h
rename to linux-x86/lib64/clang/14.0.6/include/__clang_cuda_builtin_vars.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_cmath.h b/linux-x86/lib64/clang/14.0.6/include/__clang_cuda_cmath.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_cmath.h
rename to linux-x86/lib64/clang/14.0.6/include/__clang_cuda_cmath.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_complex_builtins.h b/linux-x86/lib64/clang/14.0.6/include/__clang_cuda_complex_builtins.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_complex_builtins.h
rename to linux-x86/lib64/clang/14.0.6/include/__clang_cuda_complex_builtins.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_device_functions.h b/linux-x86/lib64/clang/14.0.6/include/__clang_cuda_device_functions.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_device_functions.h
rename to linux-x86/lib64/clang/14.0.6/include/__clang_cuda_device_functions.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h b/linux-x86/lib64/clang/14.0.6/include/__clang_cuda_intrinsics.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_intrinsics.h
rename to linux-x86/lib64/clang/14.0.6/include/__clang_cuda_intrinsics.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_libdevice_declares.h b/linux-x86/lib64/clang/14.0.6/include/__clang_cuda_libdevice_declares.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_libdevice_declares.h
rename to linux-x86/lib64/clang/14.0.6/include/__clang_cuda_libdevice_declares.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h b/linux-x86/lib64/clang/14.0.6/include/__clang_cuda_math.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h
copy to linux-x86/lib64/clang/14.0.6/include/__clang_cuda_math.h
index 538556f..e447590 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/__clang_cuda_math.h
+++ b/linux-x86/lib64/clang/14.0.6/include/__clang_cuda_math.h
@@ -345,4 +345,4 @@
 #pragma pop_macro("__DEVICE_VOID__")
 #pragma pop_macro("__FAST_OR_SLOW")
 
-#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
+#endif // __CLANG_CUDA_MATH_H__
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_math_forward_declares.h b/linux-x86/lib64/clang/14.0.6/include/__clang_cuda_math_forward_declares.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_math_forward_declares.h
rename to linux-x86/lib64/clang/14.0.6/include/__clang_cuda_math_forward_declares.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_runtime_wrapper.h b/linux-x86/lib64/clang/14.0.6/include/__clang_cuda_runtime_wrapper.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_runtime_wrapper.h
rename to linux-x86/lib64/clang/14.0.6/include/__clang_cuda_runtime_wrapper.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_cuda_texture_intrinsics.h b/linux-x86/lib64/clang/14.0.6/include/__clang_cuda_texture_intrinsics.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_cuda_texture_intrinsics.h
rename to linux-x86/lib64/clang/14.0.6/include/__clang_cuda_texture_intrinsics.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_hip_cmath.h b/linux-x86/lib64/clang/14.0.6/include/__clang_hip_cmath.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_hip_cmath.h
rename to linux-x86/lib64/clang/14.0.6/include/__clang_hip_cmath.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_hip_libdevice_declares.h b/linux-x86/lib64/clang/14.0.6/include/__clang_hip_libdevice_declares.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_hip_libdevice_declares.h
rename to linux-x86/lib64/clang/14.0.6/include/__clang_hip_libdevice_declares.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__clang_hip_math.h b/linux-x86/lib64/clang/14.0.6/include/__clang_hip_math.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__clang_hip_math.h
rename to linux-x86/lib64/clang/14.0.6/include/__clang_hip_math.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h b/linux-x86/lib64/clang/14.0.6/include/__clang_hip_runtime_wrapper.h
similarity index 80%
copy from darwin-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h
copy to linux-x86/lib64/clang/14.0.6/include/__clang_hip_runtime_wrapper.h
index 73021d2..10cec58 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/__clang_hip_runtime_wrapper.h
+++ b/linux-x86/lib64/clang/14.0.6/include/__clang_hip_runtime_wrapper.h
@@ -50,6 +50,9 @@
 #include <cmath>
 #include <cstdlib>
 #include <stdlib.h>
+#if __has_include("hip/hip_version.h")
+#include "hip/hip_version.h"
+#endif // __has_include("hip/hip_version.h")
 #else
 typedef __SIZE_TYPE__ size_t;
 // Define macros which are needed to declare HIP device API's without standard
@@ -74,25 +77,35 @@
 extern "C" {
 #endif //__cplusplus
 
+#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 405
+extern "C" __device__ unsigned long long __ockl_dm_alloc(unsigned long long __size);
+extern "C" __device__ void __ockl_dm_dealloc(unsigned long long __addr);
+__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
+  return (void *) __ockl_dm_alloc(__size);
+}
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
+  __ockl_dm_dealloc((unsigned long long)__ptr);
+}
+#else  // HIP version check
 #if __HIP_ENABLE_DEVICE_MALLOC__
 __device__ void *__hip_malloc(__hip_size_t __size);
 __device__ void *__hip_free(void *__ptr);
 __attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
   return __hip_malloc(__size);
 }
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
-  return __hip_free(__ptr);
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
+  __hip_free(__ptr);
 }
 #else
 __attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
   __builtin_trap();
   return (void *)0;
 }
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
   __builtin_trap();
-  return (void *)0;
 }
 #endif
+#endif // HIP version check
 
 #ifdef __cplusplus
 } // extern "C"
diff --git a/linux-x86/lib64/clang/14.0.2/include/__stddef_max_align_t.h b/linux-x86/lib64/clang/14.0.6/include/__stddef_max_align_t.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__stddef_max_align_t.h
rename to linux-x86/lib64/clang/14.0.6/include/__stddef_max_align_t.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__wmmintrin_aes.h b/linux-x86/lib64/clang/14.0.6/include/__wmmintrin_aes.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__wmmintrin_aes.h
rename to linux-x86/lib64/clang/14.0.6/include/__wmmintrin_aes.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h b/linux-x86/lib64/clang/14.0.6/include/__wmmintrin_pclmul.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/__wmmintrin_pclmul.h
rename to linux-x86/lib64/clang/14.0.6/include/__wmmintrin_pclmul.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/adxintrin.h b/linux-x86/lib64/clang/14.0.6/include/adxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/adxintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/adxintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/altivec.h b/linux-x86/lib64/clang/14.0.6/include/altivec.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/altivec.h
rename to linux-x86/lib64/clang/14.0.6/include/altivec.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/ammintrin.h b/linux-x86/lib64/clang/14.0.6/include/ammintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/ammintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/ammintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/amxintrin.h b/linux-x86/lib64/clang/14.0.6/include/amxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/amxintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/amxintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm64intr.h b/linux-x86/lib64/clang/14.0.6/include/arm64intr.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm64intr.h
rename to linux-x86/lib64/clang/14.0.6/include/arm64intr.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_acle.h b/linux-x86/lib64/clang/14.0.6/include/arm_acle.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm_acle.h
rename to linux-x86/lib64/clang/14.0.6/include/arm_acle.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_bf16.h b/linux-x86/lib64/clang/14.0.6/include/arm_bf16.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm_bf16.h
rename to linux-x86/lib64/clang/14.0.6/include/arm_bf16.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_cde.h b/linux-x86/lib64/clang/14.0.6/include/arm_cde.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm_cde.h
rename to linux-x86/lib64/clang/14.0.6/include/arm_cde.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_cmse.h b/linux-x86/lib64/clang/14.0.6/include/arm_cmse.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm_cmse.h
rename to linux-x86/lib64/clang/14.0.6/include/arm_cmse.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_fp16.h b/linux-x86/lib64/clang/14.0.6/include/arm_fp16.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm_fp16.h
rename to linux-x86/lib64/clang/14.0.6/include/arm_fp16.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_mve.h b/linux-x86/lib64/clang/14.0.6/include/arm_mve.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm_mve.h
rename to linux-x86/lib64/clang/14.0.6/include/arm_mve.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/arm_neon.h b/linux-x86/lib64/clang/14.0.6/include/arm_neon.h
similarity index 90%
copy from darwin-x86/lib64/clang/14.0.2/include/arm_neon.h
copy to linux-x86/lib64/clang/14.0.6/include/arm_neon.h
index 2448870..76fd7c0 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/arm_neon.h
+++ b/linux-x86/lib64/clang/14.0.6/include/arm_neon.h
@@ -19090,11 +19090,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -19129,11 +19124,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -19224,11 +19214,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
-__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
 #endif
 
 __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
@@ -19251,11 +19236,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -21675,11 +21655,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -21714,11 +21689,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -21809,11 +21779,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
-__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
 #endif
 
 __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
@@ -21836,11 +21801,6 @@
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
-__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -47053,7 +47013,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
   int32x4_t __ret;
-  __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
+  __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
   return __ret;
 }
 #else
@@ -47062,16 +47022,21 @@
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
+  __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
+__ai int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 __ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
   int16x8_t __ret;
-  __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
+  __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
   return __ret;
 }
 #else
@@ -47080,16 +47045,21 @@
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
+  __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
+__ai int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 __ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
   int32x2_t __ret;
-  __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
+  __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
   return __ret;
 }
 #else
@@ -47098,16 +47068,21 @@
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
   int32x2_t __ret;
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
+  __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
+__ai int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 __ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
   int16x4_t __ret;
-  __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
+  __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
   return __ret;
 }
 #else
@@ -47116,10 +47091,15 @@
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   int16x4_t __ret;
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
+  __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
+__ai int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -47128,7 +47108,7 @@
   int32x4_t __s1_309 = __p1_309; \
   int32x2_t __s2_309 = __p2_309; \
   int32x4_t __ret_309; \
-  __ret_309 = vqaddq_s32(__s0_309, vqrdmulhq_s32(__s1_309, splatq_lane_s32(__s2_309, __p3_309))); \
+  __ret_309 = vqrdmlahq_s32(__s0_309, __s1_309, splatq_lane_s32(__s2_309, __p3_309)); \
   __ret_309; \
 })
 #else
@@ -47140,7 +47120,7 @@
   int32x4_t __rev1_310;  __rev1_310 = __builtin_shufflevector(__s1_310, __s1_310, 3, 2, 1, 0); \
   int32x2_t __rev2_310;  __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \
   int32x4_t __ret_310; \
-  __ret_310 = __noswap_vqaddq_s32(__rev0_310, __noswap_vqrdmulhq_s32(__rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310))); \
+  __ret_310 = __noswap_vqrdmlahq_s32(__rev0_310, __rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310)); \
   __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \
   __ret_310; \
 })
@@ -47152,7 +47132,7 @@
   int16x8_t __s1_311 = __p1_311; \
   int16x4_t __s2_311 = __p2_311; \
   int16x8_t __ret_311; \
-  __ret_311 = vqaddq_s16(__s0_311, vqrdmulhq_s16(__s1_311, splatq_lane_s16(__s2_311, __p3_311))); \
+  __ret_311 = vqrdmlahq_s16(__s0_311, __s1_311, splatq_lane_s16(__s2_311, __p3_311)); \
   __ret_311; \
 })
 #else
@@ -47164,7 +47144,7 @@
   int16x8_t __rev1_312;  __rev1_312 = __builtin_shufflevector(__s1_312, __s1_312, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __rev2_312;  __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 3, 2, 1, 0); \
   int16x8_t __ret_312; \
-  __ret_312 = __noswap_vqaddq_s16(__rev0_312, __noswap_vqrdmulhq_s16(__rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312))); \
+  __ret_312 = __noswap_vqrdmlahq_s16(__rev0_312, __rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312)); \
   __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_312; \
 })
@@ -47176,7 +47156,7 @@
   int32x2_t __s1_313 = __p1_313; \
   int32x2_t __s2_313 = __p2_313; \
   int32x2_t __ret_313; \
-  __ret_313 = vqadd_s32(__s0_313, vqrdmulh_s32(__s1_313, splat_lane_s32(__s2_313, __p3_313))); \
+  __ret_313 = vqrdmlah_s32(__s0_313, __s1_313, splat_lane_s32(__s2_313, __p3_313)); \
   __ret_313; \
 })
 #else
@@ -47188,7 +47168,7 @@
   int32x2_t __rev1_314;  __rev1_314 = __builtin_shufflevector(__s1_314, __s1_314, 1, 0); \
   int32x2_t __rev2_314;  __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \
   int32x2_t __ret_314; \
-  __ret_314 = __noswap_vqadd_s32(__rev0_314, __noswap_vqrdmulh_s32(__rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314))); \
+  __ret_314 = __noswap_vqrdmlah_s32(__rev0_314, __rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314)); \
   __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \
   __ret_314; \
 })
@@ -47200,7 +47180,7 @@
   int16x4_t __s1_315 = __p1_315; \
   int16x4_t __s2_315 = __p2_315; \
   int16x4_t __ret_315; \
-  __ret_315 = vqadd_s16(__s0_315, vqrdmulh_s16(__s1_315, splat_lane_s16(__s2_315, __p3_315))); \
+  __ret_315 = vqrdmlah_s16(__s0_315, __s1_315, splat_lane_s16(__s2_315, __p3_315)); \
   __ret_315; \
 })
 #else
@@ -47212,7 +47192,7 @@
   int16x4_t __rev1_316;  __rev1_316 = __builtin_shufflevector(__s1_316, __s1_316, 3, 2, 1, 0); \
   int16x4_t __rev2_316;  __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \
   int16x4_t __ret_316; \
-  __ret_316 = __noswap_vqadd_s16(__rev0_316, __noswap_vqrdmulh_s16(__rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316))); \
+  __ret_316 = __noswap_vqrdmlah_s16(__rev0_316, __rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316)); \
   __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \
   __ret_316; \
 })
@@ -47221,7 +47201,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
   int32x4_t __ret;
-  __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
+  __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
   return __ret;
 }
 #else
@@ -47230,16 +47210,21 @@
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
+  __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
+__ai int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 __ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
   int16x8_t __ret;
-  __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
+  __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
   return __ret;
 }
 #else
@@ -47248,16 +47233,21 @@
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
+  __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
+__ai int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+  int16x8_t __ret;
+  __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 __ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
   int32x2_t __ret;
-  __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
+  __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
   return __ret;
 }
 #else
@@ -47266,16 +47256,21 @@
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
   int32x2_t __ret;
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
+  __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
+__ai int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 __ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
   int16x4_t __ret;
-  __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
+  __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
   return __ret;
 }
 #else
@@ -47284,10 +47279,15 @@
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   int16x4_t __ret;
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
+  __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
+__ai int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+  int16x4_t __ret;
+  __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+  return __ret;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -47296,7 +47296,7 @@
   int32x4_t __s1_317 = __p1_317; \
   int32x2_t __s2_317 = __p2_317; \
   int32x4_t __ret_317; \
-  __ret_317 = vqsubq_s32(__s0_317, vqrdmulhq_s32(__s1_317, splatq_lane_s32(__s2_317, __p3_317))); \
+  __ret_317 = vqrdmlshq_s32(__s0_317, __s1_317, splatq_lane_s32(__s2_317, __p3_317)); \
   __ret_317; \
 })
 #else
@@ -47308,7 +47308,7 @@
   int32x4_t __rev1_318;  __rev1_318 = __builtin_shufflevector(__s1_318, __s1_318, 3, 2, 1, 0); \
   int32x2_t __rev2_318;  __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \
   int32x4_t __ret_318; \
-  __ret_318 = __noswap_vqsubq_s32(__rev0_318, __noswap_vqrdmulhq_s32(__rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318))); \
+  __ret_318 = __noswap_vqrdmlshq_s32(__rev0_318, __rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318)); \
   __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 3, 2, 1, 0); \
   __ret_318; \
 })
@@ -47320,7 +47320,7 @@
   int16x8_t __s1_319 = __p1_319; \
   int16x4_t __s2_319 = __p2_319; \
   int16x8_t __ret_319; \
-  __ret_319 = vqsubq_s16(__s0_319, vqrdmulhq_s16(__s1_319, splatq_lane_s16(__s2_319, __p3_319))); \
+  __ret_319 = vqrdmlshq_s16(__s0_319, __s1_319, splatq_lane_s16(__s2_319, __p3_319)); \
   __ret_319; \
 })
 #else
@@ -47332,7 +47332,7 @@
   int16x8_t __rev1_320;  __rev1_320 = __builtin_shufflevector(__s1_320, __s1_320, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __rev2_320;  __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \
   int16x8_t __ret_320; \
-  __ret_320 = __noswap_vqsubq_s16(__rev0_320, __noswap_vqrdmulhq_s16(__rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320))); \
+  __ret_320 = __noswap_vqrdmlshq_s16(__rev0_320, __rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320)); \
   __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_320; \
 })
@@ -47344,7 +47344,7 @@
   int32x2_t __s1_321 = __p1_321; \
   int32x2_t __s2_321 = __p2_321; \
   int32x2_t __ret_321; \
-  __ret_321 = vqsub_s32(__s0_321, vqrdmulh_s32(__s1_321, splat_lane_s32(__s2_321, __p3_321))); \
+  __ret_321 = vqrdmlsh_s32(__s0_321, __s1_321, splat_lane_s32(__s2_321, __p3_321)); \
   __ret_321; \
 })
 #else
@@ -47356,7 +47356,7 @@
   int32x2_t __rev1_322;  __rev1_322 = __builtin_shufflevector(__s1_322, __s1_322, 1, 0); \
   int32x2_t __rev2_322;  __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \
   int32x2_t __ret_322; \
-  __ret_322 = __noswap_vqsub_s32(__rev0_322, __noswap_vqrdmulh_s32(__rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322))); \
+  __ret_322 = __noswap_vqrdmlsh_s32(__rev0_322, __rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322)); \
   __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 1, 0); \
   __ret_322; \
 })
@@ -47368,7 +47368,7 @@
   int16x4_t __s1_323 = __p1_323; \
   int16x4_t __s2_323 = __p2_323; \
   int16x4_t __ret_323; \
-  __ret_323 = vqsub_s16(__s0_323, vqrdmulh_s16(__s1_323, splat_lane_s16(__s2_323, __p3_323))); \
+  __ret_323 = vqrdmlsh_s16(__s0_323, __s1_323, splat_lane_s16(__s2_323, __p3_323)); \
   __ret_323; \
 })
 #else
@@ -47380,7 +47380,7 @@
   int16x4_t __rev1_324;  __rev1_324 = __builtin_shufflevector(__s1_324, __s1_324, 3, 2, 1, 0); \
   int16x4_t __rev2_324;  __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 3, 2, 1, 0); \
   int16x4_t __ret_324; \
-  __ret_324 = __noswap_vqsub_s16(__rev0_324, __noswap_vqrdmulh_s16(__rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324))); \
+  __ret_324 = __noswap_vqrdmlsh_s16(__rev0_324, __rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324)); \
   __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \
   __ret_324; \
 })
@@ -47388,113 +47388,111 @@
 
 #endif
 #if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
+__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqrdmlahs_s32(__p0, __p1, __p2);
+  return __ret;
+}
+__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqrdmlahh_s16(__p0, __p1, __p2);
+  return __ret;
+}
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
-  int32x4_t __s0_325 = __p0_325; \
-  int32x4_t __s1_325 = __p1_325; \
-  int32x4_t __s2_325 = __p2_325; \
-  int32x4_t __ret_325; \
-  __ret_325 = vqaddq_s32(__s0_325, vqrdmulhq_s32(__s1_325, splatq_laneq_s32(__s2_325, __p3_325))); \
+#define vqrdmlahs_lane_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
+  int32_t __s0_325 = __p0_325; \
+  int32_t __s1_325 = __p1_325; \
+  int32x2_t __s2_325 = __p2_325; \
+  int32_t __ret_325; \
+  __ret_325 = vqrdmlahs_s32(__s0_325, __s1_325, vget_lane_s32(__s2_325, __p3_325)); \
   __ret_325; \
 })
 #else
-#define vqrdmlahq_laneq_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
-  int32x4_t __s0_326 = __p0_326; \
-  int32x4_t __s1_326 = __p1_326; \
-  int32x4_t __s2_326 = __p2_326; \
-  int32x4_t __rev0_326;  __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 3, 2, 1, 0); \
-  int32x4_t __rev1_326;  __rev1_326 = __builtin_shufflevector(__s1_326, __s1_326, 3, 2, 1, 0); \
-  int32x4_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 3, 2, 1, 0); \
-  int32x4_t __ret_326; \
-  __ret_326 = __noswap_vqaddq_s32(__rev0_326, __noswap_vqrdmulhq_s32(__rev1_326, __noswap_splatq_laneq_s32(__rev2_326, __p3_326))); \
-  __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 3, 2, 1, 0); \
+#define vqrdmlahs_lane_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
+  int32_t __s0_326 = __p0_326; \
+  int32_t __s1_326 = __p1_326; \
+  int32x2_t __s2_326 = __p2_326; \
+  int32x2_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 1, 0); \
+  int32_t __ret_326; \
+  __ret_326 = vqrdmlahs_s32(__s0_326, __s1_326, __noswap_vget_lane_s32(__rev2_326, __p3_326)); \
   __ret_326; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
-  int16x8_t __s0_327 = __p0_327; \
-  int16x8_t __s1_327 = __p1_327; \
-  int16x8_t __s2_327 = __p2_327; \
-  int16x8_t __ret_327; \
-  __ret_327 = vqaddq_s16(__s0_327, vqrdmulhq_s16(__s1_327, splatq_laneq_s16(__s2_327, __p3_327))); \
+#define vqrdmlahh_lane_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
+  int16_t __s0_327 = __p0_327; \
+  int16_t __s1_327 = __p1_327; \
+  int16x4_t __s2_327 = __p2_327; \
+  int16_t __ret_327; \
+  __ret_327 = vqrdmlahh_s16(__s0_327, __s1_327, vget_lane_s16(__s2_327, __p3_327)); \
   __ret_327; \
 })
 #else
-#define vqrdmlahq_laneq_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
-  int16x8_t __s0_328 = __p0_328; \
-  int16x8_t __s1_328 = __p1_328; \
-  int16x8_t __s2_328 = __p2_328; \
-  int16x8_t __rev0_328;  __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_328;  __rev1_328 = __builtin_shufflevector(__s1_328, __s1_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_328; \
-  __ret_328 = __noswap_vqaddq_s16(__rev0_328, __noswap_vqrdmulhq_s16(__rev1_328, __noswap_splatq_laneq_s16(__rev2_328, __p3_328))); \
-  __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vqrdmlahh_lane_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
+  int16_t __s0_328 = __p0_328; \
+  int16_t __s1_328 = __p1_328; \
+  int16x4_t __s2_328 = __p2_328; \
+  int16x4_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \
+  int16_t __ret_328; \
+  __ret_328 = vqrdmlahh_s16(__s0_328, __s1_328, __noswap_vget_lane_s16(__rev2_328, __p3_328)); \
   __ret_328; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
-  int32x2_t __s0_329 = __p0_329; \
-  int32x2_t __s1_329 = __p1_329; \
+#define vqrdmlahs_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
+  int32_t __s0_329 = __p0_329; \
+  int32_t __s1_329 = __p1_329; \
   int32x4_t __s2_329 = __p2_329; \
-  int32x2_t __ret_329; \
-  __ret_329 = vqadd_s32(__s0_329, vqrdmulh_s32(__s1_329, splat_laneq_s32(__s2_329, __p3_329))); \
+  int32_t __ret_329; \
+  __ret_329 = vqrdmlahs_s32(__s0_329, __s1_329, vgetq_lane_s32(__s2_329, __p3_329)); \
   __ret_329; \
 })
 #else
-#define vqrdmlah_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
-  int32x2_t __s0_330 = __p0_330; \
-  int32x2_t __s1_330 = __p1_330; \
+#define vqrdmlahs_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
+  int32_t __s0_330 = __p0_330; \
+  int32_t __s1_330 = __p1_330; \
   int32x4_t __s2_330 = __p2_330; \
-  int32x2_t __rev0_330;  __rev0_330 = __builtin_shufflevector(__s0_330, __s0_330, 1, 0); \
-  int32x2_t __rev1_330;  __rev1_330 = __builtin_shufflevector(__s1_330, __s1_330, 1, 0); \
   int32x4_t __rev2_330;  __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \
-  int32x2_t __ret_330; \
-  __ret_330 = __noswap_vqadd_s32(__rev0_330, __noswap_vqrdmulh_s32(__rev1_330, __noswap_splat_laneq_s32(__rev2_330, __p3_330))); \
-  __ret_330 = __builtin_shufflevector(__ret_330, __ret_330, 1, 0); \
+  int32_t __ret_330; \
+  __ret_330 = vqrdmlahs_s32(__s0_330, __s1_330, __noswap_vgetq_lane_s32(__rev2_330, __p3_330)); \
   __ret_330; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
-  int16x4_t __s0_331 = __p0_331; \
-  int16x4_t __s1_331 = __p1_331; \
+#define vqrdmlahh_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
+  int16_t __s0_331 = __p0_331; \
+  int16_t __s1_331 = __p1_331; \
   int16x8_t __s2_331 = __p2_331; \
-  int16x4_t __ret_331; \
-  __ret_331 = vqadd_s16(__s0_331, vqrdmulh_s16(__s1_331, splat_laneq_s16(__s2_331, __p3_331))); \
+  int16_t __ret_331; \
+  __ret_331 = vqrdmlahh_s16(__s0_331, __s1_331, vgetq_lane_s16(__s2_331, __p3_331)); \
   __ret_331; \
 })
 #else
-#define vqrdmlah_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
-  int16x4_t __s0_332 = __p0_332; \
-  int16x4_t __s1_332 = __p1_332; \
+#define vqrdmlahh_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
+  int16_t __s0_332 = __p0_332; \
+  int16_t __s1_332 = __p1_332; \
   int16x8_t __s2_332 = __p2_332; \
-  int16x4_t __rev0_332;  __rev0_332 = __builtin_shufflevector(__s0_332, __s0_332, 3, 2, 1, 0); \
-  int16x4_t __rev1_332;  __rev1_332 = __builtin_shufflevector(__s1_332, __s1_332, 3, 2, 1, 0); \
   int16x8_t __rev2_332;  __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_332; \
-  __ret_332 = __noswap_vqadd_s16(__rev0_332, __noswap_vqrdmulh_s16(__rev1_332, __noswap_splat_laneq_s16(__rev2_332, __p3_332))); \
-  __ret_332 = __builtin_shufflevector(__ret_332, __ret_332, 3, 2, 1, 0); \
+  int16_t __ret_332; \
+  __ret_332 = vqrdmlahh_s16(__s0_332, __s1_332, __noswap_vgetq_lane_s16(__rev2_332, __p3_332)); \
   __ret_332; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
+#define vqrdmlahq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
   int32x4_t __s0_333 = __p0_333; \
   int32x4_t __s1_333 = __p1_333; \
   int32x4_t __s2_333 = __p2_333; \
   int32x4_t __ret_333; \
-  __ret_333 = vqsubq_s32(__s0_333, vqrdmulhq_s32(__s1_333, splatq_laneq_s32(__s2_333, __p3_333))); \
+  __ret_333 = vqrdmlahq_s32(__s0_333, __s1_333, splatq_laneq_s32(__s2_333, __p3_333)); \
   __ret_333; \
 })
 #else
-#define vqrdmlshq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
+#define vqrdmlahq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
   int32x4_t __s0_334 = __p0_334; \
   int32x4_t __s1_334 = __p1_334; \
   int32x4_t __s2_334 = __p2_334; \
@@ -47502,23 +47500,23 @@
   int32x4_t __rev1_334;  __rev1_334 = __builtin_shufflevector(__s1_334, __s1_334, 3, 2, 1, 0); \
   int32x4_t __rev2_334;  __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 3, 2, 1, 0); \
   int32x4_t __ret_334; \
-  __ret_334 = __noswap_vqsubq_s32(__rev0_334, __noswap_vqrdmulhq_s32(__rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334))); \
+  __ret_334 = __noswap_vqrdmlahq_s32(__rev0_334, __rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334)); \
   __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \
   __ret_334; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \
+#define vqrdmlahq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \
   int16x8_t __s0_335 = __p0_335; \
   int16x8_t __s1_335 = __p1_335; \
   int16x8_t __s2_335 = __p2_335; \
   int16x8_t __ret_335; \
-  __ret_335 = vqsubq_s16(__s0_335, vqrdmulhq_s16(__s1_335, splatq_laneq_s16(__s2_335, __p3_335))); \
+  __ret_335 = vqrdmlahq_s16(__s0_335, __s1_335, splatq_laneq_s16(__s2_335, __p3_335)); \
   __ret_335; \
 })
 #else
-#define vqrdmlshq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \
+#define vqrdmlahq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \
   int16x8_t __s0_336 = __p0_336; \
   int16x8_t __s1_336 = __p1_336; \
   int16x8_t __s2_336 = __p2_336; \
@@ -47526,23 +47524,23 @@
   int16x8_t __rev1_336;  __rev1_336 = __builtin_shufflevector(__s1_336, __s1_336, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __rev2_336;  __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __ret_336; \
-  __ret_336 = __noswap_vqsubq_s16(__rev0_336, __noswap_vqrdmulhq_s16(__rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336))); \
+  __ret_336 = __noswap_vqrdmlahq_s16(__rev0_336, __rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336)); \
   __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_336; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \
+#define vqrdmlah_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \
   int32x2_t __s0_337 = __p0_337; \
   int32x2_t __s1_337 = __p1_337; \
   int32x4_t __s2_337 = __p2_337; \
   int32x2_t __ret_337; \
-  __ret_337 = vqsub_s32(__s0_337, vqrdmulh_s32(__s1_337, splat_laneq_s32(__s2_337, __p3_337))); \
+  __ret_337 = vqrdmlah_s32(__s0_337, __s1_337, splat_laneq_s32(__s2_337, __p3_337)); \
   __ret_337; \
 })
 #else
-#define vqrdmlsh_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \
+#define vqrdmlah_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \
   int32x2_t __s0_338 = __p0_338; \
   int32x2_t __s1_338 = __p1_338; \
   int32x4_t __s2_338 = __p2_338; \
@@ -47550,23 +47548,23 @@
   int32x2_t __rev1_338;  __rev1_338 = __builtin_shufflevector(__s1_338, __s1_338, 1, 0); \
   int32x4_t __rev2_338;  __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \
   int32x2_t __ret_338; \
-  __ret_338 = __noswap_vqsub_s32(__rev0_338, __noswap_vqrdmulh_s32(__rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338))); \
+  __ret_338 = __noswap_vqrdmlah_s32(__rev0_338, __rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338)); \
   __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \
   __ret_338; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \
+#define vqrdmlah_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \
   int16x4_t __s0_339 = __p0_339; \
   int16x4_t __s1_339 = __p1_339; \
   int16x8_t __s2_339 = __p2_339; \
   int16x4_t __ret_339; \
-  __ret_339 = vqsub_s16(__s0_339, vqrdmulh_s16(__s1_339, splat_laneq_s16(__s2_339, __p3_339))); \
+  __ret_339 = vqrdmlah_s16(__s0_339, __s1_339, splat_laneq_s16(__s2_339, __p3_339)); \
   __ret_339; \
 })
 #else
-#define vqrdmlsh_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \
+#define vqrdmlah_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \
   int16x4_t __s0_340 = __p0_340; \
   int16x4_t __s1_340 = __p1_340; \
   int16x8_t __s2_340 = __p2_340; \
@@ -47574,12 +47572,202 @@
   int16x4_t __rev1_340;  __rev1_340 = __builtin_shufflevector(__s1_340, __s1_340, 3, 2, 1, 0); \
   int16x8_t __rev2_340;  __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __ret_340; \
-  __ret_340 = __noswap_vqsub_s16(__rev0_340, __noswap_vqrdmulh_s16(__rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340))); \
+  __ret_340 = __noswap_vqrdmlah_s16(__rev0_340, __rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340)); \
   __ret_340 = __builtin_shufflevector(__ret_340, __ret_340, 3, 2, 1, 0); \
   __ret_340; \
 })
 #endif
 
+__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
+  int32_t __ret;
+  __ret = (int32_t) __builtin_neon_vqrdmlshs_s32(__p0, __p1, __p2);
+  return __ret;
+}
+__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
+  int16_t __ret;
+  __ret = (int16_t) __builtin_neon_vqrdmlshh_s16(__p0, __p1, __p2);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshs_lane_s32(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \
+  int32_t __s0_341 = __p0_341; \
+  int32_t __s1_341 = __p1_341; \
+  int32x2_t __s2_341 = __p2_341; \
+  int32_t __ret_341; \
+  __ret_341 = vqrdmlshs_s32(__s0_341, __s1_341, vget_lane_s32(__s2_341, __p3_341)); \
+  __ret_341; \
+})
+#else
+#define vqrdmlshs_lane_s32(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \
+  int32_t __s0_342 = __p0_342; \
+  int32_t __s1_342 = __p1_342; \
+  int32x2_t __s2_342 = __p2_342; \
+  int32x2_t __rev2_342;  __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 1, 0); \
+  int32_t __ret_342; \
+  __ret_342 = vqrdmlshs_s32(__s0_342, __s1_342, __noswap_vget_lane_s32(__rev2_342, __p3_342)); \
+  __ret_342; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshh_lane_s16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \
+  int16_t __s0_343 = __p0_343; \
+  int16_t __s1_343 = __p1_343; \
+  int16x4_t __s2_343 = __p2_343; \
+  int16_t __ret_343; \
+  __ret_343 = vqrdmlshh_s16(__s0_343, __s1_343, vget_lane_s16(__s2_343, __p3_343)); \
+  __ret_343; \
+})
+#else
+#define vqrdmlshh_lane_s16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \
+  int16_t __s0_344 = __p0_344; \
+  int16_t __s1_344 = __p1_344; \
+  int16x4_t __s2_344 = __p2_344; \
+  int16x4_t __rev2_344;  __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \
+  int16_t __ret_344; \
+  __ret_344 = vqrdmlshh_s16(__s0_344, __s1_344, __noswap_vget_lane_s16(__rev2_344, __p3_344)); \
+  __ret_344; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshs_laneq_s32(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \
+  int32_t __s0_345 = __p0_345; \
+  int32_t __s1_345 = __p1_345; \
+  int32x4_t __s2_345 = __p2_345; \
+  int32_t __ret_345; \
+  __ret_345 = vqrdmlshs_s32(__s0_345, __s1_345, vgetq_lane_s32(__s2_345, __p3_345)); \
+  __ret_345; \
+})
+#else
+#define vqrdmlshs_laneq_s32(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \
+  int32_t __s0_346 = __p0_346; \
+  int32_t __s1_346 = __p1_346; \
+  int32x4_t __s2_346 = __p2_346; \
+  int32x4_t __rev2_346;  __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 3, 2, 1, 0); \
+  int32_t __ret_346; \
+  __ret_346 = vqrdmlshs_s32(__s0_346, __s1_346, __noswap_vgetq_lane_s32(__rev2_346, __p3_346)); \
+  __ret_346; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshh_laneq_s16(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \
+  int16_t __s0_347 = __p0_347; \
+  int16_t __s1_347 = __p1_347; \
+  int16x8_t __s2_347 = __p2_347; \
+  int16_t __ret_347; \
+  __ret_347 = vqrdmlshh_s16(__s0_347, __s1_347, vgetq_lane_s16(__s2_347, __p3_347)); \
+  __ret_347; \
+})
+#else
+#define vqrdmlshh_laneq_s16(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \
+  int16_t __s0_348 = __p0_348; \
+  int16_t __s1_348 = __p1_348; \
+  int16x8_t __s2_348 = __p2_348; \
+  int16x8_t __rev2_348;  __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_348; \
+  __ret_348 = vqrdmlshh_s16(__s0_348, __s1_348, __noswap_vgetq_lane_s16(__rev2_348, __p3_348)); \
+  __ret_348; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_laneq_s32(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \
+  int32x4_t __s0_349 = __p0_349; \
+  int32x4_t __s1_349 = __p1_349; \
+  int32x4_t __s2_349 = __p2_349; \
+  int32x4_t __ret_349; \
+  __ret_349 = vqrdmlshq_s32(__s0_349, __s1_349, splatq_laneq_s32(__s2_349, __p3_349)); \
+  __ret_349; \
+})
+#else
+#define vqrdmlshq_laneq_s32(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \
+  int32x4_t __s0_350 = __p0_350; \
+  int32x4_t __s1_350 = __p1_350; \
+  int32x4_t __s2_350 = __p2_350; \
+  int32x4_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 3, 2, 1, 0); \
+  int32x4_t __rev1_350;  __rev1_350 = __builtin_shufflevector(__s1_350, __s1_350, 3, 2, 1, 0); \
+  int32x4_t __rev2_350;  __rev2_350 = __builtin_shufflevector(__s2_350, __s2_350, 3, 2, 1, 0); \
+  int32x4_t __ret_350; \
+  __ret_350 = __noswap_vqrdmlshq_s32(__rev0_350, __rev1_350, __noswap_splatq_laneq_s32(__rev2_350, __p3_350)); \
+  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 3, 2, 1, 0); \
+  __ret_350; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlshq_laneq_s16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \
+  int16x8_t __s0_351 = __p0_351; \
+  int16x8_t __s1_351 = __p1_351; \
+  int16x8_t __s2_351 = __p2_351; \
+  int16x8_t __ret_351; \
+  __ret_351 = vqrdmlshq_s16(__s0_351, __s1_351, splatq_laneq_s16(__s2_351, __p3_351)); \
+  __ret_351; \
+})
+#else
+#define vqrdmlshq_laneq_s16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \
+  int16x8_t __s0_352 = __p0_352; \
+  int16x8_t __s1_352 = __p1_352; \
+  int16x8_t __s2_352 = __p2_352; \
+  int16x8_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_352;  __rev1_352 = __builtin_shufflevector(__s1_352, __s1_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_352;  __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_352; \
+  __ret_352 = __noswap_vqrdmlshq_s16(__rev0_352, __rev1_352, __noswap_splatq_laneq_s16(__rev2_352, __p3_352)); \
+  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_352; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_laneq_s32(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \
+  int32x2_t __s0_353 = __p0_353; \
+  int32x2_t __s1_353 = __p1_353; \
+  int32x4_t __s2_353 = __p2_353; \
+  int32x2_t __ret_353; \
+  __ret_353 = vqrdmlsh_s32(__s0_353, __s1_353, splat_laneq_s32(__s2_353, __p3_353)); \
+  __ret_353; \
+})
+#else
+#define vqrdmlsh_laneq_s32(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \
+  int32x2_t __s0_354 = __p0_354; \
+  int32x2_t __s1_354 = __p1_354; \
+  int32x4_t __s2_354 = __p2_354; \
+  int32x2_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 1, 0); \
+  int32x2_t __rev1_354;  __rev1_354 = __builtin_shufflevector(__s1_354, __s1_354, 1, 0); \
+  int32x4_t __rev2_354;  __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 3, 2, 1, 0); \
+  int32x2_t __ret_354; \
+  __ret_354 = __noswap_vqrdmlsh_s32(__rev0_354, __rev1_354, __noswap_splat_laneq_s32(__rev2_354, __p3_354)); \
+  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 1, 0); \
+  __ret_354; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmlsh_laneq_s16(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \
+  int16x4_t __s0_355 = __p0_355; \
+  int16x4_t __s1_355 = __p1_355; \
+  int16x8_t __s2_355 = __p2_355; \
+  int16x4_t __ret_355; \
+  __ret_355 = vqrdmlsh_s16(__s0_355, __s1_355, splat_laneq_s16(__s2_355, __p3_355)); \
+  __ret_355; \
+})
+#else
+#define vqrdmlsh_laneq_s16(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \
+  int16x4_t __s0_356 = __p0_356; \
+  int16x4_t __s1_356 = __p1_356; \
+  int16x8_t __s2_356 = __p2_356; \
+  int16x4_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \
+  int16x4_t __rev1_356;  __rev1_356 = __builtin_shufflevector(__s1_356, __s1_356, 3, 2, 1, 0); \
+  int16x8_t __rev2_356;  __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_356; \
+  __ret_356 = __noswap_vqrdmlsh_s16(__rev0_356, __rev1_356, __noswap_splat_laneq_s16(__rev2_356, __p3_356)); \
+  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \
+  __ret_356; \
+})
+#endif
+
 #endif
 #if defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
@@ -49998,895 +50186,895 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p8(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \
-  poly8x16_t __s0_341 = __p0_341; \
-  poly8x8_t __s2_341 = __p2_341; \
-  poly8x16_t __ret_341; \
-  __ret_341 = vsetq_lane_p8(vget_lane_p8(__s2_341, __p3_341), __s0_341, __p1_341); \
-  __ret_341; \
-})
-#else
-#define vcopyq_lane_p8(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \
-  poly8x16_t __s0_342 = __p0_342; \
-  poly8x8_t __s2_342 = __p2_342; \
-  poly8x16_t __rev0_342;  __rev0_342 = __builtin_shufflevector(__s0_342, __s0_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_342;  __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_342; \
-  __ret_342 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_342, __p3_342), __rev0_342, __p1_342); \
-  __ret_342 = __builtin_shufflevector(__ret_342, __ret_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_342; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \
-  poly16x8_t __s0_343 = __p0_343; \
-  poly16x4_t __s2_343 = __p2_343; \
-  poly16x8_t __ret_343; \
-  __ret_343 = vsetq_lane_p16(vget_lane_p16(__s2_343, __p3_343), __s0_343, __p1_343); \
-  __ret_343; \
-})
-#else
-#define vcopyq_lane_p16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \
-  poly16x8_t __s0_344 = __p0_344; \
-  poly16x4_t __s2_344 = __p2_344; \
-  poly16x8_t __rev0_344;  __rev0_344 = __builtin_shufflevector(__s0_344, __s0_344, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __rev2_344;  __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \
-  poly16x8_t __ret_344; \
-  __ret_344 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_344, __p3_344), __rev0_344, __p1_344); \
-  __ret_344 = __builtin_shufflevector(__ret_344, __ret_344, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_344; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u8(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \
-  uint8x16_t __s0_345 = __p0_345; \
-  uint8x8_t __s2_345 = __p2_345; \
-  uint8x16_t __ret_345; \
-  __ret_345 = vsetq_lane_u8(vget_lane_u8(__s2_345, __p3_345), __s0_345, __p1_345); \
-  __ret_345; \
-})
-#else
-#define vcopyq_lane_u8(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \
-  uint8x16_t __s0_346 = __p0_346; \
-  uint8x8_t __s2_346 = __p2_346; \
-  uint8x16_t __rev0_346;  __rev0_346 = __builtin_shufflevector(__s0_346, __s0_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_346;  __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_346; \
-  __ret_346 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_346, __p3_346), __rev0_346, __p1_346); \
-  __ret_346 = __builtin_shufflevector(__ret_346, __ret_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_346; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u32(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \
-  uint32x4_t __s0_347 = __p0_347; \
-  uint32x2_t __s2_347 = __p2_347; \
-  uint32x4_t __ret_347; \
-  __ret_347 = vsetq_lane_u32(vget_lane_u32(__s2_347, __p3_347), __s0_347, __p1_347); \
-  __ret_347; \
-})
-#else
-#define vcopyq_lane_u32(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \
-  uint32x4_t __s0_348 = __p0_348; \
-  uint32x2_t __s2_348 = __p2_348; \
-  uint32x4_t __rev0_348;  __rev0_348 = __builtin_shufflevector(__s0_348, __s0_348, 3, 2, 1, 0); \
-  uint32x2_t __rev2_348;  __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 1, 0); \
-  uint32x4_t __ret_348; \
-  __ret_348 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_348, __p3_348), __rev0_348, __p1_348); \
-  __ret_348 = __builtin_shufflevector(__ret_348, __ret_348, 3, 2, 1, 0); \
-  __ret_348; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u64(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \
-  uint64x2_t __s0_349 = __p0_349; \
-  uint64x1_t __s2_349 = __p2_349; \
-  uint64x2_t __ret_349; \
-  __ret_349 = vsetq_lane_u64(vget_lane_u64(__s2_349, __p3_349), __s0_349, __p1_349); \
-  __ret_349; \
-})
-#else
-#define vcopyq_lane_u64(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \
-  uint64x2_t __s0_350 = __p0_350; \
-  uint64x1_t __s2_350 = __p2_350; \
-  uint64x2_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 1, 0); \
-  uint64x2_t __ret_350; \
-  __ret_350 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_350, __p3_350), __rev0_350, __p1_350); \
-  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 1, 0); \
-  __ret_350; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \
-  uint16x8_t __s0_351 = __p0_351; \
-  uint16x4_t __s2_351 = __p2_351; \
-  uint16x8_t __ret_351; \
-  __ret_351 = vsetq_lane_u16(vget_lane_u16(__s2_351, __p3_351), __s0_351, __p1_351); \
-  __ret_351; \
-})
-#else
-#define vcopyq_lane_u16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \
-  uint16x8_t __s0_352 = __p0_352; \
-  uint16x4_t __s2_352 = __p2_352; \
-  uint16x8_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_352;  __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 3, 2, 1, 0); \
-  uint16x8_t __ret_352; \
-  __ret_352 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_352, __p3_352), __rev0_352, __p1_352); \
-  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_352; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s8(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \
-  int8x16_t __s0_353 = __p0_353; \
-  int8x8_t __s2_353 = __p2_353; \
-  int8x16_t __ret_353; \
-  __ret_353 = vsetq_lane_s8(vget_lane_s8(__s2_353, __p3_353), __s0_353, __p1_353); \
-  __ret_353; \
-})
-#else
-#define vcopyq_lane_s8(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \
-  int8x16_t __s0_354 = __p0_354; \
-  int8x8_t __s2_354 = __p2_354; \
-  int8x16_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_354;  __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_354; \
-  __ret_354 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_354, __p3_354), __rev0_354, __p1_354); \
-  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_354; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f32(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \
-  float32x4_t __s0_355 = __p0_355; \
-  float32x2_t __s2_355 = __p2_355; \
-  float32x4_t __ret_355; \
-  __ret_355 = vsetq_lane_f32(vget_lane_f32(__s2_355, __p3_355), __s0_355, __p1_355); \
-  __ret_355; \
-})
-#else
-#define vcopyq_lane_f32(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \
-  float32x4_t __s0_356 = __p0_356; \
-  float32x2_t __s2_356 = __p2_356; \
-  float32x4_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \
-  float32x2_t __rev2_356;  __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 1, 0); \
-  float32x4_t __ret_356; \
-  __ret_356 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_356, __p3_356), __rev0_356, __p1_356); \
-  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \
-  __ret_356; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s32(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \
-  int32x4_t __s0_357 = __p0_357; \
-  int32x2_t __s2_357 = __p2_357; \
-  int32x4_t __ret_357; \
-  __ret_357 = vsetq_lane_s32(vget_lane_s32(__s2_357, __p3_357), __s0_357, __p1_357); \
+#define vcopyq_lane_p8(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \
+  poly8x16_t __s0_357 = __p0_357; \
+  poly8x8_t __s2_357 = __p2_357; \
+  poly8x16_t __ret_357; \
+  __ret_357 = vsetq_lane_p8(vget_lane_p8(__s2_357, __p3_357), __s0_357, __p1_357); \
   __ret_357; \
 })
 #else
-#define vcopyq_lane_s32(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \
-  int32x4_t __s0_358 = __p0_358; \
-  int32x2_t __s2_358 = __p2_358; \
-  int32x4_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 3, 2, 1, 0); \
-  int32x2_t __rev2_358;  __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 1, 0); \
-  int32x4_t __ret_358; \
-  __ret_358 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_358, __p3_358), __rev0_358, __p1_358); \
-  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 3, 2, 1, 0); \
+#define vcopyq_lane_p8(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \
+  poly8x16_t __s0_358 = __p0_358; \
+  poly8x8_t __s2_358 = __p2_358; \
+  poly8x16_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __rev2_358;  __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_358; \
+  __ret_358 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_358, __p3_358), __rev0_358, __p1_358); \
+  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_358; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s64(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \
-  int64x2_t __s0_359 = __p0_359; \
-  int64x1_t __s2_359 = __p2_359; \
-  int64x2_t __ret_359; \
-  __ret_359 = vsetq_lane_s64(vget_lane_s64(__s2_359, __p3_359), __s0_359, __p1_359); \
+#define vcopyq_lane_p16(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \
+  poly16x8_t __s0_359 = __p0_359; \
+  poly16x4_t __s2_359 = __p2_359; \
+  poly16x8_t __ret_359; \
+  __ret_359 = vsetq_lane_p16(vget_lane_p16(__s2_359, __p3_359), __s0_359, __p1_359); \
   __ret_359; \
 })
 #else
-#define vcopyq_lane_s64(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \
-  int64x2_t __s0_360 = __p0_360; \
-  int64x1_t __s2_360 = __p2_360; \
-  int64x2_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 1, 0); \
-  int64x2_t __ret_360; \
-  __ret_360 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_360, __p3_360), __rev0_360, __p1_360); \
-  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 1, 0); \
+#define vcopyq_lane_p16(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \
+  poly16x8_t __s0_360 = __p0_360; \
+  poly16x4_t __s2_360 = __p2_360; \
+  poly16x8_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __rev2_360;  __rev2_360 = __builtin_shufflevector(__s2_360, __s2_360, 3, 2, 1, 0); \
+  poly16x8_t __ret_360; \
+  __ret_360 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_360, __p3_360), __rev0_360, __p1_360); \
+  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_360; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s16(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \
-  int16x8_t __s0_361 = __p0_361; \
-  int16x4_t __s2_361 = __p2_361; \
-  int16x8_t __ret_361; \
-  __ret_361 = vsetq_lane_s16(vget_lane_s16(__s2_361, __p3_361), __s0_361, __p1_361); \
+#define vcopyq_lane_u8(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \
+  uint8x16_t __s0_361 = __p0_361; \
+  uint8x8_t __s2_361 = __p2_361; \
+  uint8x16_t __ret_361; \
+  __ret_361 = vsetq_lane_u8(vget_lane_u8(__s2_361, __p3_361), __s0_361, __p1_361); \
   __ret_361; \
 })
 #else
-#define vcopyq_lane_s16(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \
-  int16x8_t __s0_362 = __p0_362; \
-  int16x4_t __s2_362 = __p2_362; \
-  int16x8_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_362;  __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 3, 2, 1, 0); \
-  int16x8_t __ret_362; \
-  __ret_362 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_362, __p3_362), __rev0_362, __p1_362); \
-  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopyq_lane_u8(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \
+  uint8x16_t __s0_362 = __p0_362; \
+  uint8x8_t __s2_362 = __p2_362; \
+  uint8x16_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_362;  __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_362; \
+  __ret_362 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_362, __p3_362), __rev0_362, __p1_362); \
+  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_362; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p8(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \
-  poly8x8_t __s0_363 = __p0_363; \
-  poly8x8_t __s2_363 = __p2_363; \
-  poly8x8_t __ret_363; \
-  __ret_363 = vset_lane_p8(vget_lane_p8(__s2_363, __p3_363), __s0_363, __p1_363); \
+#define vcopyq_lane_u32(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \
+  uint32x4_t __s0_363 = __p0_363; \
+  uint32x2_t __s2_363 = __p2_363; \
+  uint32x4_t __ret_363; \
+  __ret_363 = vsetq_lane_u32(vget_lane_u32(__s2_363, __p3_363), __s0_363, __p1_363); \
   __ret_363; \
 })
 #else
-#define vcopy_lane_p8(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \
-  poly8x8_t __s0_364 = __p0_364; \
-  poly8x8_t __s2_364 = __p2_364; \
-  poly8x8_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_364;  __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_364; \
-  __ret_364 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_364, __p3_364), __rev0_364, __p1_364); \
-  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopyq_lane_u32(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \
+  uint32x4_t __s0_364 = __p0_364; \
+  uint32x2_t __s2_364 = __p2_364; \
+  uint32x4_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 3, 2, 1, 0); \
+  uint32x2_t __rev2_364;  __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 1, 0); \
+  uint32x4_t __ret_364; \
+  __ret_364 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_364, __p3_364), __rev0_364, __p1_364); \
+  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 3, 2, 1, 0); \
   __ret_364; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p16(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \
-  poly16x4_t __s0_365 = __p0_365; \
-  poly16x4_t __s2_365 = __p2_365; \
-  poly16x4_t __ret_365; \
-  __ret_365 = vset_lane_p16(vget_lane_p16(__s2_365, __p3_365), __s0_365, __p1_365); \
+#define vcopyq_lane_u64(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \
+  uint64x2_t __s0_365 = __p0_365; \
+  uint64x1_t __s2_365 = __p2_365; \
+  uint64x2_t __ret_365; \
+  __ret_365 = vsetq_lane_u64(vget_lane_u64(__s2_365, __p3_365), __s0_365, __p1_365); \
   __ret_365; \
 })
 #else
-#define vcopy_lane_p16(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \
-  poly16x4_t __s0_366 = __p0_366; \
-  poly16x4_t __s2_366 = __p2_366; \
-  poly16x4_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 3, 2, 1, 0); \
-  poly16x4_t __rev2_366;  __rev2_366 = __builtin_shufflevector(__s2_366, __s2_366, 3, 2, 1, 0); \
-  poly16x4_t __ret_366; \
-  __ret_366 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_366, __p3_366), __rev0_366, __p1_366); \
-  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 3, 2, 1, 0); \
+#define vcopyq_lane_u64(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \
+  uint64x2_t __s0_366 = __p0_366; \
+  uint64x1_t __s2_366 = __p2_366; \
+  uint64x2_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 1, 0); \
+  uint64x2_t __ret_366; \
+  __ret_366 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_366, __p3_366), __rev0_366, __p1_366); \
+  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \
   __ret_366; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u8(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \
-  uint8x8_t __s0_367 = __p0_367; \
-  uint8x8_t __s2_367 = __p2_367; \
-  uint8x8_t __ret_367; \
-  __ret_367 = vset_lane_u8(vget_lane_u8(__s2_367, __p3_367), __s0_367, __p1_367); \
+#define vcopyq_lane_u16(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \
+  uint16x8_t __s0_367 = __p0_367; \
+  uint16x4_t __s2_367 = __p2_367; \
+  uint16x8_t __ret_367; \
+  __ret_367 = vsetq_lane_u16(vget_lane_u16(__s2_367, __p3_367), __s0_367, __p1_367); \
   __ret_367; \
 })
 #else
-#define vcopy_lane_u8(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \
-  uint8x8_t __s0_368 = __p0_368; \
-  uint8x8_t __s2_368 = __p2_368; \
-  uint8x8_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_368;  __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_368; \
-  __ret_368 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_368, __p3_368), __rev0_368, __p1_368); \
+#define vcopyq_lane_u16(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \
+  uint16x8_t __s0_368 = __p0_368; \
+  uint16x4_t __s2_368 = __p2_368; \
+  uint16x8_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_368;  __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 3, 2, 1, 0); \
+  uint16x8_t __ret_368; \
+  __ret_368 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_368, __p3_368), __rev0_368, __p1_368); \
   __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_368; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u32(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \
-  uint32x2_t __s0_369 = __p0_369; \
-  uint32x2_t __s2_369 = __p2_369; \
-  uint32x2_t __ret_369; \
-  __ret_369 = vset_lane_u32(vget_lane_u32(__s2_369, __p3_369), __s0_369, __p1_369); \
+#define vcopyq_lane_s8(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \
+  int8x16_t __s0_369 = __p0_369; \
+  int8x8_t __s2_369 = __p2_369; \
+  int8x16_t __ret_369; \
+  __ret_369 = vsetq_lane_s8(vget_lane_s8(__s2_369, __p3_369), __s0_369, __p1_369); \
   __ret_369; \
 })
 #else
-#define vcopy_lane_u32(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \
-  uint32x2_t __s0_370 = __p0_370; \
-  uint32x2_t __s2_370 = __p2_370; \
-  uint32x2_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 1, 0); \
-  uint32x2_t __rev2_370;  __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 1, 0); \
-  uint32x2_t __ret_370; \
-  __ret_370 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_370, __p3_370), __rev0_370, __p1_370); \
-  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 1, 0); \
+#define vcopyq_lane_s8(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \
+  int8x16_t __s0_370 = __p0_370; \
+  int8x8_t __s2_370 = __p2_370; \
+  int8x16_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_370;  __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_370; \
+  __ret_370 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_370, __p3_370), __rev0_370, __p1_370); \
+  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_370; \
 })
 #endif
 
-#define vcopy_lane_u64(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \
-  uint64x1_t __s0_371 = __p0_371; \
-  uint64x1_t __s2_371 = __p2_371; \
-  uint64x1_t __ret_371; \
-  __ret_371 = vset_lane_u64(vget_lane_u64(__s2_371, __p3_371), __s0_371, __p1_371); \
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_f32(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \
+  float32x4_t __s0_371 = __p0_371; \
+  float32x2_t __s2_371 = __p2_371; \
+  float32x4_t __ret_371; \
+  __ret_371 = vsetq_lane_f32(vget_lane_f32(__s2_371, __p3_371), __s0_371, __p1_371); \
   __ret_371; \
 })
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u16(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \
-  uint16x4_t __s0_372 = __p0_372; \
-  uint16x4_t __s2_372 = __p2_372; \
-  uint16x4_t __ret_372; \
-  __ret_372 = vset_lane_u16(vget_lane_u16(__s2_372, __p3_372), __s0_372, __p1_372); \
+#else
+#define vcopyq_lane_f32(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \
+  float32x4_t __s0_372 = __p0_372; \
+  float32x2_t __s2_372 = __p2_372; \
+  float32x4_t __rev0_372;  __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 3, 2, 1, 0); \
+  float32x2_t __rev2_372;  __rev2_372 = __builtin_shufflevector(__s2_372, __s2_372, 1, 0); \
+  float32x4_t __ret_372; \
+  __ret_372 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_372, __p3_372), __rev0_372, __p1_372); \
+  __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 3, 2, 1, 0); \
   __ret_372; \
 })
-#else
-#define vcopy_lane_u16(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \
-  uint16x4_t __s0_373 = __p0_373; \
-  uint16x4_t __s2_373 = __p2_373; \
-  uint16x4_t __rev0_373;  __rev0_373 = __builtin_shufflevector(__s0_373, __s0_373, 3, 2, 1, 0); \
-  uint16x4_t __rev2_373;  __rev2_373 = __builtin_shufflevector(__s2_373, __s2_373, 3, 2, 1, 0); \
-  uint16x4_t __ret_373; \
-  __ret_373 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_373, __p3_373), __rev0_373, __p1_373); \
-  __ret_373 = __builtin_shufflevector(__ret_373, __ret_373, 3, 2, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s32(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \
+  int32x4_t __s0_373 = __p0_373; \
+  int32x2_t __s2_373 = __p2_373; \
+  int32x4_t __ret_373; \
+  __ret_373 = vsetq_lane_s32(vget_lane_s32(__s2_373, __p3_373), __s0_373, __p1_373); \
   __ret_373; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s8(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \
-  int8x8_t __s0_374 = __p0_374; \
-  int8x8_t __s2_374 = __p2_374; \
-  int8x8_t __ret_374; \
-  __ret_374 = vset_lane_s8(vget_lane_s8(__s2_374, __p3_374), __s0_374, __p1_374); \
+#else
+#define vcopyq_lane_s32(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \
+  int32x4_t __s0_374 = __p0_374; \
+  int32x2_t __s2_374 = __p2_374; \
+  int32x4_t __rev0_374;  __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \
+  int32x2_t __rev2_374;  __rev2_374 = __builtin_shufflevector(__s2_374, __s2_374, 1, 0); \
+  int32x4_t __ret_374; \
+  __ret_374 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_374, __p3_374), __rev0_374, __p1_374); \
+  __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \
   __ret_374; \
 })
-#else
-#define vcopy_lane_s8(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \
-  int8x8_t __s0_375 = __p0_375; \
-  int8x8_t __s2_375 = __p2_375; \
-  int8x8_t __rev0_375;  __rev0_375 = __builtin_shufflevector(__s0_375, __s0_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_375;  __rev2_375 = __builtin_shufflevector(__s2_375, __s2_375, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_375; \
-  __ret_375 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_375, __p3_375), __rev0_375, __p1_375); \
-  __ret_375 = __builtin_shufflevector(__ret_375, __ret_375, 7, 6, 5, 4, 3, 2, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s64(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \
+  int64x2_t __s0_375 = __p0_375; \
+  int64x1_t __s2_375 = __p2_375; \
+  int64x2_t __ret_375; \
+  __ret_375 = vsetq_lane_s64(vget_lane_s64(__s2_375, __p3_375), __s0_375, __p1_375); \
   __ret_375; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_f32(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \
-  float32x2_t __s0_376 = __p0_376; \
-  float32x2_t __s2_376 = __p2_376; \
-  float32x2_t __ret_376; \
-  __ret_376 = vset_lane_f32(vget_lane_f32(__s2_376, __p3_376), __s0_376, __p1_376); \
+#else
+#define vcopyq_lane_s64(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \
+  int64x2_t __s0_376 = __p0_376; \
+  int64x1_t __s2_376 = __p2_376; \
+  int64x2_t __rev0_376;  __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 1, 0); \
+  int64x2_t __ret_376; \
+  __ret_376 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_376, __p3_376), __rev0_376, __p1_376); \
+  __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 1, 0); \
   __ret_376; \
 })
-#else
-#define vcopy_lane_f32(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \
-  float32x2_t __s0_377 = __p0_377; \
-  float32x2_t __s2_377 = __p2_377; \
-  float32x2_t __rev0_377;  __rev0_377 = __builtin_shufflevector(__s0_377, __s0_377, 1, 0); \
-  float32x2_t __rev2_377;  __rev2_377 = __builtin_shufflevector(__s2_377, __s2_377, 1, 0); \
-  float32x2_t __ret_377; \
-  __ret_377 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_377, __p3_377), __rev0_377, __p1_377); \
-  __ret_377 = __builtin_shufflevector(__ret_377, __ret_377, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_s16(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \
+  int16x8_t __s0_377 = __p0_377; \
+  int16x4_t __s2_377 = __p2_377; \
+  int16x8_t __ret_377; \
+  __ret_377 = vsetq_lane_s16(vget_lane_s16(__s2_377, __p3_377), __s0_377, __p1_377); \
   __ret_377; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s32(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \
-  int32x2_t __s0_378 = __p0_378; \
-  int32x2_t __s2_378 = __p2_378; \
-  int32x2_t __ret_378; \
-  __ret_378 = vset_lane_s32(vget_lane_s32(__s2_378, __p3_378), __s0_378, __p1_378); \
+#else
+#define vcopyq_lane_s16(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \
+  int16x8_t __s0_378 = __p0_378; \
+  int16x4_t __s2_378 = __p2_378; \
+  int16x8_t __rev0_378;  __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_378;  __rev2_378 = __builtin_shufflevector(__s2_378, __s2_378, 3, 2, 1, 0); \
+  int16x8_t __ret_378; \
+  __ret_378 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_378, __p3_378), __rev0_378, __p1_378); \
+  __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_378; \
 })
-#else
-#define vcopy_lane_s32(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \
-  int32x2_t __s0_379 = __p0_379; \
-  int32x2_t __s2_379 = __p2_379; \
-  int32x2_t __rev0_379;  __rev0_379 = __builtin_shufflevector(__s0_379, __s0_379, 1, 0); \
-  int32x2_t __rev2_379;  __rev2_379 = __builtin_shufflevector(__s2_379, __s2_379, 1, 0); \
-  int32x2_t __ret_379; \
-  __ret_379 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_379, __p3_379), __rev0_379, __p1_379); \
-  __ret_379 = __builtin_shufflevector(__ret_379, __ret_379, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_p8(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \
+  poly8x8_t __s0_379 = __p0_379; \
+  poly8x8_t __s2_379 = __p2_379; \
+  poly8x8_t __ret_379; \
+  __ret_379 = vset_lane_p8(vget_lane_p8(__s2_379, __p3_379), __s0_379, __p1_379); \
   __ret_379; \
 })
+#else
+#define vcopy_lane_p8(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \
+  poly8x8_t __s0_380 = __p0_380; \
+  poly8x8_t __s2_380 = __p2_380; \
+  poly8x8_t __rev0_380;  __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __rev2_380;  __rev2_380 = __builtin_shufflevector(__s2_380, __s2_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_380; \
+  __ret_380 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_380, __p3_380), __rev0_380, __p1_380); \
+  __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_380; \
+})
 #endif
 
-#define vcopy_lane_s64(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \
-  int64x1_t __s0_380 = __p0_380; \
-  int64x1_t __s2_380 = __p2_380; \
-  int64x1_t __ret_380; \
-  __ret_380 = vset_lane_s64(vget_lane_s64(__s2_380, __p3_380), __s0_380, __p1_380); \
-  __ret_380; \
-})
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \
-  int16x4_t __s0_381 = __p0_381; \
-  int16x4_t __s2_381 = __p2_381; \
-  int16x4_t __ret_381; \
-  __ret_381 = vset_lane_s16(vget_lane_s16(__s2_381, __p3_381), __s0_381, __p1_381); \
+#define vcopy_lane_p16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \
+  poly16x4_t __s0_381 = __p0_381; \
+  poly16x4_t __s2_381 = __p2_381; \
+  poly16x4_t __ret_381; \
+  __ret_381 = vset_lane_p16(vget_lane_p16(__s2_381, __p3_381), __s0_381, __p1_381); \
   __ret_381; \
 })
 #else
-#define vcopy_lane_s16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \
-  int16x4_t __s0_382 = __p0_382; \
-  int16x4_t __s2_382 = __p2_382; \
-  int16x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
-  int16x4_t __rev2_382;  __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \
-  int16x4_t __ret_382; \
-  __ret_382 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_382, __p3_382), __rev0_382, __p1_382); \
+#define vcopy_lane_p16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \
+  poly16x4_t __s0_382 = __p0_382; \
+  poly16x4_t __s2_382 = __p2_382; \
+  poly16x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
+  poly16x4_t __rev2_382;  __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \
+  poly16x4_t __ret_382; \
+  __ret_382 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_382, __p3_382), __rev0_382, __p1_382); \
   __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 3, 2, 1, 0); \
   __ret_382; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \
-  poly8x16_t __s0_383 = __p0_383; \
-  poly8x16_t __s2_383 = __p2_383; \
-  poly8x16_t __ret_383; \
-  __ret_383 = vsetq_lane_p8(vgetq_lane_p8(__s2_383, __p3_383), __s0_383, __p1_383); \
+#define vcopy_lane_u8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \
+  uint8x8_t __s0_383 = __p0_383; \
+  uint8x8_t __s2_383 = __p2_383; \
+  uint8x8_t __ret_383; \
+  __ret_383 = vset_lane_u8(vget_lane_u8(__s2_383, __p3_383), __s0_383, __p1_383); \
   __ret_383; \
 })
 #else
-#define vcopyq_laneq_p8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \
-  poly8x16_t __s0_384 = __p0_384; \
-  poly8x16_t __s2_384 = __p2_384; \
-  poly8x16_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_384;  __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_384; \
-  __ret_384 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_384, __p3_384), __rev0_384, __p1_384); \
-  __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopy_lane_u8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \
+  uint8x8_t __s0_384 = __p0_384; \
+  uint8x8_t __s2_384 = __p2_384; \
+  uint8x8_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_384;  __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_384; \
+  __ret_384 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_384, __p3_384), __rev0_384, __p1_384); \
+  __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_384; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p16(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \
-  poly16x8_t __s0_385 = __p0_385; \
-  poly16x8_t __s2_385 = __p2_385; \
-  poly16x8_t __ret_385; \
-  __ret_385 = vsetq_lane_p16(vgetq_lane_p16(__s2_385, __p3_385), __s0_385, __p1_385); \
+#define vcopy_lane_u32(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \
+  uint32x2_t __s0_385 = __p0_385; \
+  uint32x2_t __s2_385 = __p2_385; \
+  uint32x2_t __ret_385; \
+  __ret_385 = vset_lane_u32(vget_lane_u32(__s2_385, __p3_385), __s0_385, __p1_385); \
   __ret_385; \
 })
 #else
-#define vcopyq_laneq_p16(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \
-  poly16x8_t __s0_386 = __p0_386; \
-  poly16x8_t __s2_386 = __p2_386; \
-  poly16x8_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev2_386;  __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_386; \
-  __ret_386 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_386, __p3_386), __rev0_386, __p1_386); \
-  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopy_lane_u32(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \
+  uint32x2_t __s0_386 = __p0_386; \
+  uint32x2_t __s2_386 = __p2_386; \
+  uint32x2_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 1, 0); \
+  uint32x2_t __rev2_386;  __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 1, 0); \
+  uint32x2_t __ret_386; \
+  __ret_386 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_386, __p3_386), __rev0_386, __p1_386); \
+  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 1, 0); \
   __ret_386; \
 })
 #endif
 
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u8(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \
-  uint8x16_t __s0_387 = __p0_387; \
-  uint8x16_t __s2_387 = __p2_387; \
-  uint8x16_t __ret_387; \
-  __ret_387 = vsetq_lane_u8(vgetq_lane_u8(__s2_387, __p3_387), __s0_387, __p1_387); \
+#define vcopy_lane_u64(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \
+  uint64x1_t __s0_387 = __p0_387; \
+  uint64x1_t __s2_387 = __p2_387; \
+  uint64x1_t __ret_387; \
+  __ret_387 = vset_lane_u64(vget_lane_u64(__s2_387, __p3_387), __s0_387, __p1_387); \
   __ret_387; \
 })
-#else
-#define vcopyq_laneq_u8(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \
-  uint8x16_t __s0_388 = __p0_388; \
-  uint8x16_t __s2_388 = __p2_388; \
-  uint8x16_t __rev0_388;  __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_388;  __rev2_388 = __builtin_shufflevector(__s2_388, __s2_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_388; \
-  __ret_388 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_388, __p3_388), __rev0_388, __p1_388); \
-  __ret_388 = __builtin_shufflevector(__ret_388, __ret_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_u16(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \
+  uint16x4_t __s0_388 = __p0_388; \
+  uint16x4_t __s2_388 = __p2_388; \
+  uint16x4_t __ret_388; \
+  __ret_388 = vset_lane_u16(vget_lane_u16(__s2_388, __p3_388), __s0_388, __p1_388); \
   __ret_388; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u32(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \
-  uint32x4_t __s0_389 = __p0_389; \
-  uint32x4_t __s2_389 = __p2_389; \
-  uint32x4_t __ret_389; \
-  __ret_389 = vsetq_lane_u32(vgetq_lane_u32(__s2_389, __p3_389), __s0_389, __p1_389); \
+#else
+#define vcopy_lane_u16(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \
+  uint16x4_t __s0_389 = __p0_389; \
+  uint16x4_t __s2_389 = __p2_389; \
+  uint16x4_t __rev0_389;  __rev0_389 = __builtin_shufflevector(__s0_389, __s0_389, 3, 2, 1, 0); \
+  uint16x4_t __rev2_389;  __rev2_389 = __builtin_shufflevector(__s2_389, __s2_389, 3, 2, 1, 0); \
+  uint16x4_t __ret_389; \
+  __ret_389 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_389, __p3_389), __rev0_389, __p1_389); \
+  __ret_389 = __builtin_shufflevector(__ret_389, __ret_389, 3, 2, 1, 0); \
   __ret_389; \
 })
-#else
-#define vcopyq_laneq_u32(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \
-  uint32x4_t __s0_390 = __p0_390; \
-  uint32x4_t __s2_390 = __p2_390; \
-  uint32x4_t __rev0_390;  __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 3, 2, 1, 0); \
-  uint32x4_t __rev2_390;  __rev2_390 = __builtin_shufflevector(__s2_390, __s2_390, 3, 2, 1, 0); \
-  uint32x4_t __ret_390; \
-  __ret_390 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_390, __p3_390), __rev0_390, __p1_390); \
-  __ret_390 = __builtin_shufflevector(__ret_390, __ret_390, 3, 2, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s8(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \
+  int8x8_t __s0_390 = __p0_390; \
+  int8x8_t __s2_390 = __p2_390; \
+  int8x8_t __ret_390; \
+  __ret_390 = vset_lane_s8(vget_lane_s8(__s2_390, __p3_390), __s0_390, __p1_390); \
   __ret_390; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u64(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \
-  uint64x2_t __s0_391 = __p0_391; \
-  uint64x2_t __s2_391 = __p2_391; \
-  uint64x2_t __ret_391; \
-  __ret_391 = vsetq_lane_u64(vgetq_lane_u64(__s2_391, __p3_391), __s0_391, __p1_391); \
+#else
+#define vcopy_lane_s8(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \
+  int8x8_t __s0_391 = __p0_391; \
+  int8x8_t __s2_391 = __p2_391; \
+  int8x8_t __rev0_391;  __rev0_391 = __builtin_shufflevector(__s0_391, __s0_391, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_391;  __rev2_391 = __builtin_shufflevector(__s2_391, __s2_391, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_391; \
+  __ret_391 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_391, __p3_391), __rev0_391, __p1_391); \
+  __ret_391 = __builtin_shufflevector(__ret_391, __ret_391, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_391; \
 })
-#else
-#define vcopyq_laneq_u64(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \
-  uint64x2_t __s0_392 = __p0_392; \
-  uint64x2_t __s2_392 = __p2_392; \
-  uint64x2_t __rev0_392;  __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 1, 0); \
-  uint64x2_t __rev2_392;  __rev2_392 = __builtin_shufflevector(__s2_392, __s2_392, 1, 0); \
-  uint64x2_t __ret_392; \
-  __ret_392 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_392, __p3_392), __rev0_392, __p1_392); \
-  __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_f32(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \
+  float32x2_t __s0_392 = __p0_392; \
+  float32x2_t __s2_392 = __p2_392; \
+  float32x2_t __ret_392; \
+  __ret_392 = vset_lane_f32(vget_lane_f32(__s2_392, __p3_392), __s0_392, __p1_392); \
   __ret_392; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u16(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \
-  uint16x8_t __s0_393 = __p0_393; \
-  uint16x8_t __s2_393 = __p2_393; \
-  uint16x8_t __ret_393; \
-  __ret_393 = vsetq_lane_u16(vgetq_lane_u16(__s2_393, __p3_393), __s0_393, __p1_393); \
+#else
+#define vcopy_lane_f32(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \
+  float32x2_t __s0_393 = __p0_393; \
+  float32x2_t __s2_393 = __p2_393; \
+  float32x2_t __rev0_393;  __rev0_393 = __builtin_shufflevector(__s0_393, __s0_393, 1, 0); \
+  float32x2_t __rev2_393;  __rev2_393 = __builtin_shufflevector(__s2_393, __s2_393, 1, 0); \
+  float32x2_t __ret_393; \
+  __ret_393 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_393, __p3_393), __rev0_393, __p1_393); \
+  __ret_393 = __builtin_shufflevector(__ret_393, __ret_393, 1, 0); \
   __ret_393; \
 })
-#else
-#define vcopyq_laneq_u16(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \
-  uint16x8_t __s0_394 = __p0_394; \
-  uint16x8_t __s2_394 = __p2_394; \
-  uint16x8_t __rev0_394;  __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_394;  __rev2_394 = __builtin_shufflevector(__s2_394, __s2_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_394; \
-  __ret_394 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_394, __p3_394), __rev0_394, __p1_394); \
-  __ret_394 = __builtin_shufflevector(__ret_394, __ret_394, 7, 6, 5, 4, 3, 2, 1, 0); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_s32(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \
+  int32x2_t __s0_394 = __p0_394; \
+  int32x2_t __s2_394 = __p2_394; \
+  int32x2_t __ret_394; \
+  __ret_394 = vset_lane_s32(vget_lane_s32(__s2_394, __p3_394), __s0_394, __p1_394); \
   __ret_394; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s8(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \
-  int8x16_t __s0_395 = __p0_395; \
-  int8x16_t __s2_395 = __p2_395; \
-  int8x16_t __ret_395; \
-  __ret_395 = vsetq_lane_s8(vgetq_lane_s8(__s2_395, __p3_395), __s0_395, __p1_395); \
+#else
+#define vcopy_lane_s32(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \
+  int32x2_t __s0_395 = __p0_395; \
+  int32x2_t __s2_395 = __p2_395; \
+  int32x2_t __rev0_395;  __rev0_395 = __builtin_shufflevector(__s0_395, __s0_395, 1, 0); \
+  int32x2_t __rev2_395;  __rev2_395 = __builtin_shufflevector(__s2_395, __s2_395, 1, 0); \
+  int32x2_t __ret_395; \
+  __ret_395 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_395, __p3_395), __rev0_395, __p1_395); \
+  __ret_395 = __builtin_shufflevector(__ret_395, __ret_395, 1, 0); \
   __ret_395; \
 })
-#else
-#define vcopyq_laneq_s8(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \
-  int8x16_t __s0_396 = __p0_396; \
-  int8x16_t __s2_396 = __p2_396; \
-  int8x16_t __rev0_396;  __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_396;  __rev2_396 = __builtin_shufflevector(__s2_396, __s2_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_396; \
-  __ret_396 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_396, __p3_396), __rev0_396, __p1_396); \
-  __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_396; \
-})
 #endif
 
+#define vcopy_lane_s64(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \
+  int64x1_t __s0_396 = __p0_396; \
+  int64x1_t __s2_396 = __p2_396; \
+  int64x1_t __ret_396; \
+  __ret_396 = vset_lane_s64(vget_lane_s64(__s2_396, __p3_396), __s0_396, __p1_396); \
+  __ret_396; \
+})
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f32(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \
-  float32x4_t __s0_397 = __p0_397; \
-  float32x4_t __s2_397 = __p2_397; \
-  float32x4_t __ret_397; \
-  __ret_397 = vsetq_lane_f32(vgetq_lane_f32(__s2_397, __p3_397), __s0_397, __p1_397); \
+#define vcopy_lane_s16(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \
+  int16x4_t __s0_397 = __p0_397; \
+  int16x4_t __s2_397 = __p2_397; \
+  int16x4_t __ret_397; \
+  __ret_397 = vset_lane_s16(vget_lane_s16(__s2_397, __p3_397), __s0_397, __p1_397); \
   __ret_397; \
 })
 #else
-#define vcopyq_laneq_f32(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \
-  float32x4_t __s0_398 = __p0_398; \
-  float32x4_t __s2_398 = __p2_398; \
-  float32x4_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \
-  float32x4_t __rev2_398;  __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \
-  float32x4_t __ret_398; \
-  __ret_398 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_398, __p3_398), __rev0_398, __p1_398); \
+#define vcopy_lane_s16(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \
+  int16x4_t __s0_398 = __p0_398; \
+  int16x4_t __s2_398 = __p2_398; \
+  int16x4_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \
+  int16x4_t __rev2_398;  __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \
+  int16x4_t __ret_398; \
+  __ret_398 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_398, __p3_398), __rev0_398, __p1_398); \
   __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \
   __ret_398; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s32(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \
-  int32x4_t __s0_399 = __p0_399; \
-  int32x4_t __s2_399 = __p2_399; \
-  int32x4_t __ret_399; \
-  __ret_399 = vsetq_lane_s32(vgetq_lane_s32(__s2_399, __p3_399), __s0_399, __p1_399); \
+#define vcopyq_laneq_p8(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \
+  poly8x16_t __s0_399 = __p0_399; \
+  poly8x16_t __s2_399 = __p2_399; \
+  poly8x16_t __ret_399; \
+  __ret_399 = vsetq_lane_p8(vgetq_lane_p8(__s2_399, __p3_399), __s0_399, __p1_399); \
   __ret_399; \
 })
 #else
-#define vcopyq_laneq_s32(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \
-  int32x4_t __s0_400 = __p0_400; \
-  int32x4_t __s2_400 = __p2_400; \
-  int32x4_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 3, 2, 1, 0); \
-  int32x4_t __rev2_400;  __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 3, 2, 1, 0); \
-  int32x4_t __ret_400; \
-  __ret_400 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_400, __p3_400), __rev0_400, __p1_400); \
-  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 3, 2, 1, 0); \
+#define vcopyq_laneq_p8(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \
+  poly8x16_t __s0_400 = __p0_400; \
+  poly8x16_t __s2_400 = __p2_400; \
+  poly8x16_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __rev2_400;  __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_400; \
+  __ret_400 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_400, __p3_400), __rev0_400, __p1_400); \
+  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_400; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s64(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
-  int64x2_t __s0_401 = __p0_401; \
-  int64x2_t __s2_401 = __p2_401; \
-  int64x2_t __ret_401; \
-  __ret_401 = vsetq_lane_s64(vgetq_lane_s64(__s2_401, __p3_401), __s0_401, __p1_401); \
+#define vcopyq_laneq_p16(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
+  poly16x8_t __s0_401 = __p0_401; \
+  poly16x8_t __s2_401 = __p2_401; \
+  poly16x8_t __ret_401; \
+  __ret_401 = vsetq_lane_p16(vgetq_lane_p16(__s2_401, __p3_401), __s0_401, __p1_401); \
   __ret_401; \
 })
 #else
-#define vcopyq_laneq_s64(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
-  int64x2_t __s0_402 = __p0_402; \
-  int64x2_t __s2_402 = __p2_402; \
-  int64x2_t __rev0_402;  __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 1, 0); \
-  int64x2_t __rev2_402;  __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 1, 0); \
-  int64x2_t __ret_402; \
-  __ret_402 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_402, __p3_402), __rev0_402, __p1_402); \
-  __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 1, 0); \
+#define vcopyq_laneq_p16(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
+  poly16x8_t __s0_402 = __p0_402; \
+  poly16x8_t __s2_402 = __p2_402; \
+  poly16x8_t __rev0_402;  __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __rev2_402;  __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __ret_402; \
+  __ret_402 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_402, __p3_402), __rev0_402, __p1_402); \
+  __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_402; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s16(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
-  int16x8_t __s0_403 = __p0_403; \
-  int16x8_t __s2_403 = __p2_403; \
-  int16x8_t __ret_403; \
-  __ret_403 = vsetq_lane_s16(vgetq_lane_s16(__s2_403, __p3_403), __s0_403, __p1_403); \
+#define vcopyq_laneq_u8(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
+  uint8x16_t __s0_403 = __p0_403; \
+  uint8x16_t __s2_403 = __p2_403; \
+  uint8x16_t __ret_403; \
+  __ret_403 = vsetq_lane_u8(vgetq_lane_u8(__s2_403, __p3_403), __s0_403, __p1_403); \
   __ret_403; \
 })
 #else
-#define vcopyq_laneq_s16(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
-  int16x8_t __s0_404 = __p0_404; \
-  int16x8_t __s2_404 = __p2_404; \
-  int16x8_t __rev0_404;  __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_404;  __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_404; \
-  __ret_404 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_404, __p3_404), __rev0_404, __p1_404); \
-  __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopyq_laneq_u8(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
+  uint8x16_t __s0_404 = __p0_404; \
+  uint8x16_t __s2_404 = __p2_404; \
+  uint8x16_t __rev0_404;  __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_404;  __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_404; \
+  __ret_404 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_404, __p3_404), __rev0_404, __p1_404); \
+  __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_404; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p8(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
-  poly8x8_t __s0_405 = __p0_405; \
-  poly8x16_t __s2_405 = __p2_405; \
-  poly8x8_t __ret_405; \
-  __ret_405 = vset_lane_p8(vgetq_lane_p8(__s2_405, __p3_405), __s0_405, __p1_405); \
+#define vcopyq_laneq_u32(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
+  uint32x4_t __s0_405 = __p0_405; \
+  uint32x4_t __s2_405 = __p2_405; \
+  uint32x4_t __ret_405; \
+  __ret_405 = vsetq_lane_u32(vgetq_lane_u32(__s2_405, __p3_405), __s0_405, __p1_405); \
   __ret_405; \
 })
 #else
-#define vcopy_laneq_p8(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
-  poly8x8_t __s0_406 = __p0_406; \
-  poly8x16_t __s2_406 = __p2_406; \
-  poly8x8_t __rev0_406;  __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_406;  __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_406; \
-  __ret_406 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_406, __p3_406), __rev0_406, __p1_406); \
-  __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopyq_laneq_u32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
+  uint32x4_t __s0_406 = __p0_406; \
+  uint32x4_t __s2_406 = __p2_406; \
+  uint32x4_t __rev0_406;  __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 3, 2, 1, 0); \
+  uint32x4_t __rev2_406;  __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 3, 2, 1, 0); \
+  uint32x4_t __ret_406; \
+  __ret_406 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_406, __p3_406), __rev0_406, __p1_406); \
+  __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 3, 2, 1, 0); \
   __ret_406; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p16(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
-  poly16x4_t __s0_407 = __p0_407; \
-  poly16x8_t __s2_407 = __p2_407; \
-  poly16x4_t __ret_407; \
-  __ret_407 = vset_lane_p16(vgetq_lane_p16(__s2_407, __p3_407), __s0_407, __p1_407); \
+#define vcopyq_laneq_u64(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
+  uint64x2_t __s0_407 = __p0_407; \
+  uint64x2_t __s2_407 = __p2_407; \
+  uint64x2_t __ret_407; \
+  __ret_407 = vsetq_lane_u64(vgetq_lane_u64(__s2_407, __p3_407), __s0_407, __p1_407); \
   __ret_407; \
 })
 #else
-#define vcopy_laneq_p16(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
-  poly16x4_t __s0_408 = __p0_408; \
-  poly16x8_t __s2_408 = __p2_408; \
-  poly16x4_t __rev0_408;  __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 3, 2, 1, 0); \
-  poly16x8_t __rev2_408;  __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_408; \
-  __ret_408 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_408, __p3_408), __rev0_408, __p1_408); \
-  __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 3, 2, 1, 0); \
+#define vcopyq_laneq_u64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
+  uint64x2_t __s0_408 = __p0_408; \
+  uint64x2_t __s2_408 = __p2_408; \
+  uint64x2_t __rev0_408;  __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 1, 0); \
+  uint64x2_t __rev2_408;  __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 1, 0); \
+  uint64x2_t __ret_408; \
+  __ret_408 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_408, __p3_408), __rev0_408, __p1_408); \
+  __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 1, 0); \
   __ret_408; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u8(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
-  uint8x8_t __s0_409 = __p0_409; \
-  uint8x16_t __s2_409 = __p2_409; \
-  uint8x8_t __ret_409; \
-  __ret_409 = vset_lane_u8(vgetq_lane_u8(__s2_409, __p3_409), __s0_409, __p1_409); \
+#define vcopyq_laneq_u16(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
+  uint16x8_t __s0_409 = __p0_409; \
+  uint16x8_t __s2_409 = __p2_409; \
+  uint16x8_t __ret_409; \
+  __ret_409 = vsetq_lane_u16(vgetq_lane_u16(__s2_409, __p3_409), __s0_409, __p1_409); \
   __ret_409; \
 })
 #else
-#define vcopy_laneq_u8(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
-  uint8x8_t __s0_410 = __p0_410; \
-  uint8x16_t __s2_410 = __p2_410; \
-  uint8x8_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_410; \
-  __ret_410 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_410, __p3_410), __rev0_410, __p1_410); \
+#define vcopyq_laneq_u16(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
+  uint16x8_t __s0_410 = __p0_410; \
+  uint16x8_t __s2_410 = __p2_410; \
+  uint16x8_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_410; \
+  __ret_410 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_410, __p3_410), __rev0_410, __p1_410); \
   __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_410; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u32(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
-  uint32x2_t __s0_411 = __p0_411; \
-  uint32x4_t __s2_411 = __p2_411; \
-  uint32x2_t __ret_411; \
-  __ret_411 = vset_lane_u32(vgetq_lane_u32(__s2_411, __p3_411), __s0_411, __p1_411); \
+#define vcopyq_laneq_s8(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
+  int8x16_t __s0_411 = __p0_411; \
+  int8x16_t __s2_411 = __p2_411; \
+  int8x16_t __ret_411; \
+  __ret_411 = vsetq_lane_s8(vgetq_lane_s8(__s2_411, __p3_411), __s0_411, __p1_411); \
   __ret_411; \
 })
 #else
-#define vcopy_laneq_u32(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
-  uint32x2_t __s0_412 = __p0_412; \
-  uint32x4_t __s2_412 = __p2_412; \
-  uint32x2_t __rev0_412;  __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 1, 0); \
-  uint32x4_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 3, 2, 1, 0); \
-  uint32x2_t __ret_412; \
-  __ret_412 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_412, __p3_412), __rev0_412, __p1_412); \
-  __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 1, 0); \
+#define vcopyq_laneq_s8(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
+  int8x16_t __s0_412 = __p0_412; \
+  int8x16_t __s2_412 = __p2_412; \
+  int8x16_t __rev0_412;  __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_412; \
+  __ret_412 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_412, __p3_412), __rev0_412, __p1_412); \
+  __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_412; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u64(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
-  uint64x1_t __s0_413 = __p0_413; \
-  uint64x2_t __s2_413 = __p2_413; \
-  uint64x1_t __ret_413; \
-  __ret_413 = vset_lane_u64(vgetq_lane_u64(__s2_413, __p3_413), __s0_413, __p1_413); \
+#define vcopyq_laneq_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
+  float32x4_t __s0_413 = __p0_413; \
+  float32x4_t __s2_413 = __p2_413; \
+  float32x4_t __ret_413; \
+  __ret_413 = vsetq_lane_f32(vgetq_lane_f32(__s2_413, __p3_413), __s0_413, __p1_413); \
   __ret_413; \
 })
 #else
-#define vcopy_laneq_u64(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
-  uint64x1_t __s0_414 = __p0_414; \
-  uint64x2_t __s2_414 = __p2_414; \
-  uint64x2_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 1, 0); \
-  uint64x1_t __ret_414; \
-  __ret_414 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_414, __p3_414), __s0_414, __p1_414); \
+#define vcopyq_laneq_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
+  float32x4_t __s0_414 = __p0_414; \
+  float32x4_t __s2_414 = __p2_414; \
+  float32x4_t __rev0_414;  __rev0_414 = __builtin_shufflevector(__s0_414, __s0_414, 3, 2, 1, 0); \
+  float32x4_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 3, 2, 1, 0); \
+  float32x4_t __ret_414; \
+  __ret_414 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_414, __p3_414), __rev0_414, __p1_414); \
+  __ret_414 = __builtin_shufflevector(__ret_414, __ret_414, 3, 2, 1, 0); \
   __ret_414; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u16(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
-  uint16x4_t __s0_415 = __p0_415; \
-  uint16x8_t __s2_415 = __p2_415; \
-  uint16x4_t __ret_415; \
-  __ret_415 = vset_lane_u16(vgetq_lane_u16(__s2_415, __p3_415), __s0_415, __p1_415); \
+#define vcopyq_laneq_s32(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
+  int32x4_t __s0_415 = __p0_415; \
+  int32x4_t __s2_415 = __p2_415; \
+  int32x4_t __ret_415; \
+  __ret_415 = vsetq_lane_s32(vgetq_lane_s32(__s2_415, __p3_415), __s0_415, __p1_415); \
   __ret_415; \
 })
 #else
-#define vcopy_laneq_u16(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
-  uint16x4_t __s0_416 = __p0_416; \
-  uint16x8_t __s2_416 = __p2_416; \
-  uint16x4_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \
-  uint16x8_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_416; \
-  __ret_416 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_416, __p3_416), __rev0_416, __p1_416); \
+#define vcopyq_laneq_s32(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
+  int32x4_t __s0_416 = __p0_416; \
+  int32x4_t __s2_416 = __p2_416; \
+  int32x4_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \
+  int32x4_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 3, 2, 1, 0); \
+  int32x4_t __ret_416; \
+  __ret_416 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_416, __p3_416), __rev0_416, __p1_416); \
   __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 3, 2, 1, 0); \
   __ret_416; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s8(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
-  int8x8_t __s0_417 = __p0_417; \
-  int8x16_t __s2_417 = __p2_417; \
-  int8x8_t __ret_417; \
-  __ret_417 = vset_lane_s8(vgetq_lane_s8(__s2_417, __p3_417), __s0_417, __p1_417); \
+#define vcopyq_laneq_s64(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
+  int64x2_t __s0_417 = __p0_417; \
+  int64x2_t __s2_417 = __p2_417; \
+  int64x2_t __ret_417; \
+  __ret_417 = vsetq_lane_s64(vgetq_lane_s64(__s2_417, __p3_417), __s0_417, __p1_417); \
   __ret_417; \
 })
 #else
-#define vcopy_laneq_s8(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
-  int8x8_t __s0_418 = __p0_418; \
-  int8x16_t __s2_418 = __p2_418; \
-  int8x8_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_418; \
-  __ret_418 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_418, __p3_418), __rev0_418, __p1_418); \
-  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vcopyq_laneq_s64(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
+  int64x2_t __s0_418 = __p0_418; \
+  int64x2_t __s2_418 = __p2_418; \
+  int64x2_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 1, 0); \
+  int64x2_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 1, 0); \
+  int64x2_t __ret_418; \
+  __ret_418 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_418, __p3_418), __rev0_418, __p1_418); \
+  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 1, 0); \
   __ret_418; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f32(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
-  float32x2_t __s0_419 = __p0_419; \
-  float32x4_t __s2_419 = __p2_419; \
-  float32x2_t __ret_419; \
-  __ret_419 = vset_lane_f32(vgetq_lane_f32(__s2_419, __p3_419), __s0_419, __p1_419); \
+#define vcopyq_laneq_s16(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
+  int16x8_t __s0_419 = __p0_419; \
+  int16x8_t __s2_419 = __p2_419; \
+  int16x8_t __ret_419; \
+  __ret_419 = vsetq_lane_s16(vgetq_lane_s16(__s2_419, __p3_419), __s0_419, __p1_419); \
   __ret_419; \
 })
 #else
-#define vcopy_laneq_f32(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
-  float32x2_t __s0_420 = __p0_420; \
-  float32x4_t __s2_420 = __p2_420; \
-  float32x2_t __rev0_420;  __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 1, 0); \
-  float32x4_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 3, 2, 1, 0); \
-  float32x2_t __ret_420; \
-  __ret_420 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_420, __p3_420), __rev0_420, __p1_420); \
-  __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 1, 0); \
+#define vcopyq_laneq_s16(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
+  int16x8_t __s0_420 = __p0_420; \
+  int16x8_t __s2_420 = __p2_420; \
+  int16x8_t __rev0_420;  __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_420; \
+  __ret_420 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_420, __p3_420), __rev0_420, __p1_420); \
+  __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_420; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
-  int32x2_t __s0_421 = __p0_421; \
-  int32x4_t __s2_421 = __p2_421; \
-  int32x2_t __ret_421; \
-  __ret_421 = vset_lane_s32(vgetq_lane_s32(__s2_421, __p3_421), __s0_421, __p1_421); \
+#define vcopy_laneq_p8(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
+  poly8x8_t __s0_421 = __p0_421; \
+  poly8x16_t __s2_421 = __p2_421; \
+  poly8x8_t __ret_421; \
+  __ret_421 = vset_lane_p8(vgetq_lane_p8(__s2_421, __p3_421), __s0_421, __p1_421); \
   __ret_421; \
 })
 #else
-#define vcopy_laneq_s32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
-  int32x2_t __s0_422 = __p0_422; \
-  int32x4_t __s2_422 = __p2_422; \
-  int32x2_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 1, 0); \
-  int32x4_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \
-  int32x2_t __ret_422; \
-  __ret_422 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_422, __p3_422), __rev0_422, __p1_422); \
-  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 1, 0); \
+#define vcopy_laneq_p8(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
+  poly8x8_t __s0_422 = __p0_422; \
+  poly8x16_t __s2_422 = __p2_422; \
+  poly8x8_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_422; \
+  __ret_422 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_422, __p3_422), __rev0_422, __p1_422); \
+  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_422; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s64(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
-  int64x1_t __s0_423 = __p0_423; \
-  int64x2_t __s2_423 = __p2_423; \
-  int64x1_t __ret_423; \
-  __ret_423 = vset_lane_s64(vgetq_lane_s64(__s2_423, __p3_423), __s0_423, __p1_423); \
+#define vcopy_laneq_p16(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
+  poly16x4_t __s0_423 = __p0_423; \
+  poly16x8_t __s2_423 = __p2_423; \
+  poly16x4_t __ret_423; \
+  __ret_423 = vset_lane_p16(vgetq_lane_p16(__s2_423, __p3_423), __s0_423, __p1_423); \
   __ret_423; \
 })
 #else
-#define vcopy_laneq_s64(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
-  int64x1_t __s0_424 = __p0_424; \
-  int64x2_t __s2_424 = __p2_424; \
-  int64x2_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 1, 0); \
-  int64x1_t __ret_424; \
-  __ret_424 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_424, __p3_424), __s0_424, __p1_424); \
+#define vcopy_laneq_p16(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
+  poly16x4_t __s0_424 = __p0_424; \
+  poly16x8_t __s2_424 = __p2_424; \
+  poly16x4_t __rev0_424;  __rev0_424 = __builtin_shufflevector(__s0_424, __s0_424, 3, 2, 1, 0); \
+  poly16x8_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __ret_424; \
+  __ret_424 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_424, __p3_424), __rev0_424, __p1_424); \
+  __ret_424 = __builtin_shufflevector(__ret_424, __ret_424, 3, 2, 1, 0); \
   __ret_424; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s16(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
-  int16x4_t __s0_425 = __p0_425; \
-  int16x8_t __s2_425 = __p2_425; \
-  int16x4_t __ret_425; \
-  __ret_425 = vset_lane_s16(vgetq_lane_s16(__s2_425, __p3_425), __s0_425, __p1_425); \
+#define vcopy_laneq_u8(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
+  uint8x8_t __s0_425 = __p0_425; \
+  uint8x16_t __s2_425 = __p2_425; \
+  uint8x8_t __ret_425; \
+  __ret_425 = vset_lane_u8(vgetq_lane_u8(__s2_425, __p3_425), __s0_425, __p1_425); \
   __ret_425; \
 })
 #else
-#define vcopy_laneq_s16(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
-  int16x4_t __s0_426 = __p0_426; \
-  int16x8_t __s2_426 = __p2_426; \
-  int16x4_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 3, 2, 1, 0); \
-  int16x8_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_426; \
-  __ret_426 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_426, __p3_426), __rev0_426, __p1_426); \
-  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 3, 2, 1, 0); \
+#define vcopy_laneq_u8(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
+  uint8x8_t __s0_426 = __p0_426; \
+  uint8x16_t __s2_426 = __p2_426; \
+  uint8x8_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_426; \
+  __ret_426 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_426, __p3_426), __rev0_426, __p1_426); \
+  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_426; \
 })
 #endif
 
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \
+  uint32x2_t __s0_427 = __p0_427; \
+  uint32x4_t __s2_427 = __p2_427; \
+  uint32x2_t __ret_427; \
+  __ret_427 = vset_lane_u32(vgetq_lane_u32(__s2_427, __p3_427), __s0_427, __p1_427); \
+  __ret_427; \
+})
+#else
+#define vcopy_laneq_u32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \
+  uint32x2_t __s0_428 = __p0_428; \
+  uint32x4_t __s2_428 = __p2_428; \
+  uint32x2_t __rev0_428;  __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 1, 0); \
+  uint32x4_t __rev2_428;  __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \
+  uint32x2_t __ret_428; \
+  __ret_428 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_428, __p3_428), __rev0_428, __p1_428); \
+  __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 1, 0); \
+  __ret_428; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u64(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \
+  uint64x1_t __s0_429 = __p0_429; \
+  uint64x2_t __s2_429 = __p2_429; \
+  uint64x1_t __ret_429; \
+  __ret_429 = vset_lane_u64(vgetq_lane_u64(__s2_429, __p3_429), __s0_429, __p1_429); \
+  __ret_429; \
+})
+#else
+#define vcopy_laneq_u64(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \
+  uint64x1_t __s0_430 = __p0_430; \
+  uint64x2_t __s2_430 = __p2_430; \
+  uint64x2_t __rev2_430;  __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 1, 0); \
+  uint64x1_t __ret_430; \
+  __ret_430 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_430, __p3_430), __s0_430, __p1_430); \
+  __ret_430; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_u16(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \
+  uint16x4_t __s0_431 = __p0_431; \
+  uint16x8_t __s2_431 = __p2_431; \
+  uint16x4_t __ret_431; \
+  __ret_431 = vset_lane_u16(vgetq_lane_u16(__s2_431, __p3_431), __s0_431, __p1_431); \
+  __ret_431; \
+})
+#else
+#define vcopy_laneq_u16(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \
+  uint16x4_t __s0_432 = __p0_432; \
+  uint16x8_t __s2_432 = __p2_432; \
+  uint16x4_t __rev0_432;  __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \
+  uint16x8_t __rev2_432;  __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_432; \
+  __ret_432 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_432, __p3_432), __rev0_432, __p1_432); \
+  __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \
+  __ret_432; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s8(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \
+  int8x8_t __s0_433 = __p0_433; \
+  int8x16_t __s2_433 = __p2_433; \
+  int8x8_t __ret_433; \
+  __ret_433 = vset_lane_s8(vgetq_lane_s8(__s2_433, __p3_433), __s0_433, __p1_433); \
+  __ret_433; \
+})
+#else
+#define vcopy_laneq_s8(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \
+  int8x8_t __s0_434 = __p0_434; \
+  int8x16_t __s2_434 = __p2_434; \
+  int8x8_t __rev0_434;  __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_434;  __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_434; \
+  __ret_434 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_434, __p3_434), __rev0_434, __p1_434); \
+  __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_434; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_f32(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \
+  float32x2_t __s0_435 = __p0_435; \
+  float32x4_t __s2_435 = __p2_435; \
+  float32x2_t __ret_435; \
+  __ret_435 = vset_lane_f32(vgetq_lane_f32(__s2_435, __p3_435), __s0_435, __p1_435); \
+  __ret_435; \
+})
+#else
+#define vcopy_laneq_f32(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \
+  float32x2_t __s0_436 = __p0_436; \
+  float32x4_t __s2_436 = __p2_436; \
+  float32x2_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 1, 0); \
+  float32x4_t __rev2_436;  __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 3, 2, 1, 0); \
+  float32x2_t __ret_436; \
+  __ret_436 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_436, __p3_436), __rev0_436, __p1_436); \
+  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 1, 0); \
+  __ret_436; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \
+  int32x2_t __s0_437 = __p0_437; \
+  int32x4_t __s2_437 = __p2_437; \
+  int32x2_t __ret_437; \
+  __ret_437 = vset_lane_s32(vgetq_lane_s32(__s2_437, __p3_437), __s0_437, __p1_437); \
+  __ret_437; \
+})
+#else
+#define vcopy_laneq_s32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \
+  int32x2_t __s0_438 = __p0_438; \
+  int32x4_t __s2_438 = __p2_438; \
+  int32x2_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \
+  int32x4_t __rev2_438;  __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \
+  int32x2_t __ret_438; \
+  __ret_438 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_438, __p3_438), __rev0_438, __p1_438); \
+  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \
+  __ret_438; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s64(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \
+  int64x1_t __s0_439 = __p0_439; \
+  int64x2_t __s2_439 = __p2_439; \
+  int64x1_t __ret_439; \
+  __ret_439 = vset_lane_s64(vgetq_lane_s64(__s2_439, __p3_439), __s0_439, __p1_439); \
+  __ret_439; \
+})
+#else
+#define vcopy_laneq_s64(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \
+  int64x1_t __s0_440 = __p0_440; \
+  int64x2_t __s2_440 = __p2_440; \
+  int64x2_t __rev2_440;  __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 1, 0); \
+  int64x1_t __ret_440; \
+  __ret_440 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_440, __p3_440), __s0_440, __p1_440); \
+  __ret_440; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_s16(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \
+  int16x4_t __s0_441 = __p0_441; \
+  int16x8_t __s2_441 = __p2_441; \
+  int16x4_t __ret_441; \
+  __ret_441 = vset_lane_s16(vgetq_lane_s16(__s2_441, __p3_441), __s0_441, __p1_441); \
+  __ret_441; \
+})
+#else
+#define vcopy_laneq_s16(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \
+  int16x4_t __s0_442 = __p0_442; \
+  int16x8_t __s2_442 = __p2_442; \
+  int16x4_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 3, 2, 1, 0); \
+  int16x8_t __rev2_442;  __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_442; \
+  __ret_442 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_442, __p3_442), __rev0_442, __p1_442); \
+  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
+  __ret_442; \
+})
+#endif
+
 #define vcreate_p64(__p0) __extension__ ({ \
   poly64x1_t __ret; \
   uint64_t __promote = __p0; \
@@ -51625,85 +51813,85 @@
 })
 #endif
 
-#define vdup_lane_p64(__p0_427, __p1_427) __extension__ ({ \
-  poly64x1_t __s0_427 = __p0_427; \
-  poly64x1_t __ret_427; \
-  __ret_427 = splat_lane_p64(__s0_427, __p1_427); \
-  __ret_427; \
+#define vdup_lane_p64(__p0_443, __p1_443) __extension__ ({ \
+  poly64x1_t __s0_443 = __p0_443; \
+  poly64x1_t __ret_443; \
+  __ret_443 = splat_lane_p64(__s0_443, __p1_443); \
+  __ret_443; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p64(__p0_428, __p1_428) __extension__ ({ \
-  poly64x1_t __s0_428 = __p0_428; \
-  poly64x2_t __ret_428; \
-  __ret_428 = splatq_lane_p64(__s0_428, __p1_428); \
-  __ret_428; \
+#define vdupq_lane_p64(__p0_444, __p1_444) __extension__ ({ \
+  poly64x1_t __s0_444 = __p0_444; \
+  poly64x2_t __ret_444; \
+  __ret_444 = splatq_lane_p64(__s0_444, __p1_444); \
+  __ret_444; \
 })
 #else
-#define vdupq_lane_p64(__p0_429, __p1_429) __extension__ ({ \
-  poly64x1_t __s0_429 = __p0_429; \
-  poly64x2_t __ret_429; \
-  __ret_429 = __noswap_splatq_lane_p64(__s0_429, __p1_429); \
-  __ret_429 = __builtin_shufflevector(__ret_429, __ret_429, 1, 0); \
-  __ret_429; \
+#define vdupq_lane_p64(__p0_445, __p1_445) __extension__ ({ \
+  poly64x1_t __s0_445 = __p0_445; \
+  poly64x2_t __ret_445; \
+  __ret_445 = __noswap_splatq_lane_p64(__s0_445, __p1_445); \
+  __ret_445 = __builtin_shufflevector(__ret_445, __ret_445, 1, 0); \
+  __ret_445; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f64(__p0_430, __p1_430) __extension__ ({ \
-  float64x1_t __s0_430 = __p0_430; \
-  float64x2_t __ret_430; \
-  __ret_430 = splatq_lane_f64(__s0_430, __p1_430); \
-  __ret_430; \
+#define vdupq_lane_f64(__p0_446, __p1_446) __extension__ ({ \
+  float64x1_t __s0_446 = __p0_446; \
+  float64x2_t __ret_446; \
+  __ret_446 = splatq_lane_f64(__s0_446, __p1_446); \
+  __ret_446; \
 })
 #else
-#define vdupq_lane_f64(__p0_431, __p1_431) __extension__ ({ \
-  float64x1_t __s0_431 = __p0_431; \
-  float64x2_t __ret_431; \
-  __ret_431 = __noswap_splatq_lane_f64(__s0_431, __p1_431); \
-  __ret_431 = __builtin_shufflevector(__ret_431, __ret_431, 1, 0); \
-  __ret_431; \
+#define vdupq_lane_f64(__p0_447, __p1_447) __extension__ ({ \
+  float64x1_t __s0_447 = __p0_447; \
+  float64x2_t __ret_447; \
+  __ret_447 = __noswap_splatq_lane_f64(__s0_447, __p1_447); \
+  __ret_447 = __builtin_shufflevector(__ret_447, __ret_447, 1, 0); \
+  __ret_447; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0_432, __p1_432) __extension__ ({ \
-  float16x4_t __s0_432 = __p0_432; \
-  float16x8_t __ret_432; \
-  __ret_432 = splatq_lane_f16(__s0_432, __p1_432); \
-  __ret_432; \
+#define vdupq_lane_f16(__p0_448, __p1_448) __extension__ ({ \
+  float16x4_t __s0_448 = __p0_448; \
+  float16x8_t __ret_448; \
+  __ret_448 = splatq_lane_f16(__s0_448, __p1_448); \
+  __ret_448; \
 })
 #else
-#define vdupq_lane_f16(__p0_433, __p1_433) __extension__ ({ \
-  float16x4_t __s0_433 = __p0_433; \
-  float16x4_t __rev0_433;  __rev0_433 = __builtin_shufflevector(__s0_433, __s0_433, 3, 2, 1, 0); \
-  float16x8_t __ret_433; \
-  __ret_433 = __noswap_splatq_lane_f16(__rev0_433, __p1_433); \
-  __ret_433 = __builtin_shufflevector(__ret_433, __ret_433, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_433; \
+#define vdupq_lane_f16(__p0_449, __p1_449) __extension__ ({ \
+  float16x4_t __s0_449 = __p0_449; \
+  float16x4_t __rev0_449;  __rev0_449 = __builtin_shufflevector(__s0_449, __s0_449, 3, 2, 1, 0); \
+  float16x8_t __ret_449; \
+  __ret_449 = __noswap_splatq_lane_f16(__rev0_449, __p1_449); \
+  __ret_449 = __builtin_shufflevector(__ret_449, __ret_449, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_449; \
 })
 #endif
 
-#define vdup_lane_f64(__p0_434, __p1_434) __extension__ ({ \
-  float64x1_t __s0_434 = __p0_434; \
-  float64x1_t __ret_434; \
-  __ret_434 = splat_lane_f64(__s0_434, __p1_434); \
-  __ret_434; \
+#define vdup_lane_f64(__p0_450, __p1_450) __extension__ ({ \
+  float64x1_t __s0_450 = __p0_450; \
+  float64x1_t __ret_450; \
+  __ret_450 = splat_lane_f64(__s0_450, __p1_450); \
+  __ret_450; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0_435, __p1_435) __extension__ ({ \
-  float16x4_t __s0_435 = __p0_435; \
-  float16x4_t __ret_435; \
-  __ret_435 = splat_lane_f16(__s0_435, __p1_435); \
-  __ret_435; \
+#define vdup_lane_f16(__p0_451, __p1_451) __extension__ ({ \
+  float16x4_t __s0_451 = __p0_451; \
+  float16x4_t __ret_451; \
+  __ret_451 = splat_lane_f16(__s0_451, __p1_451); \
+  __ret_451; \
 })
 #else
-#define vdup_lane_f16(__p0_436, __p1_436) __extension__ ({ \
-  float16x4_t __s0_436 = __p0_436; \
-  float16x4_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 3, 2, 1, 0); \
-  float16x4_t __ret_436; \
-  __ret_436 = __noswap_splat_lane_f16(__rev0_436, __p1_436); \
-  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 3, 2, 1, 0); \
-  __ret_436; \
+#define vdup_lane_f16(__p0_452, __p1_452) __extension__ ({ \
+  float16x4_t __s0_452 = __p0_452; \
+  float16x4_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 3, 2, 1, 0); \
+  float16x4_t __ret_452; \
+  __ret_452 = __noswap_splat_lane_f16(__rev0_452, __p1_452); \
+  __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 3, 2, 1, 0); \
+  __ret_452; \
 })
 #endif
 
@@ -51912,505 +52100,505 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p8(__p0_437, __p1_437) __extension__ ({ \
-  poly8x16_t __s0_437 = __p0_437; \
-  poly8x8_t __ret_437; \
-  __ret_437 = splat_laneq_p8(__s0_437, __p1_437); \
-  __ret_437; \
-})
-#else
-#define vdup_laneq_p8(__p0_438, __p1_438) __extension__ ({ \
-  poly8x16_t __s0_438 = __p0_438; \
-  poly8x16_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_438; \
-  __ret_438 = __noswap_splat_laneq_p8(__rev0_438, __p1_438); \
-  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_438; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p64(__p0_439, __p1_439) __extension__ ({ \
-  poly64x2_t __s0_439 = __p0_439; \
-  poly64x1_t __ret_439; \
-  __ret_439 = splat_laneq_p64(__s0_439, __p1_439); \
-  __ret_439; \
-})
-#else
-#define vdup_laneq_p64(__p0_440, __p1_440) __extension__ ({ \
-  poly64x2_t __s0_440 = __p0_440; \
-  poly64x2_t __rev0_440;  __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 1, 0); \
-  poly64x1_t __ret_440; \
-  __ret_440 = __noswap_splat_laneq_p64(__rev0_440, __p1_440); \
-  __ret_440; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p16(__p0_441, __p1_441) __extension__ ({ \
-  poly16x8_t __s0_441 = __p0_441; \
-  poly16x4_t __ret_441; \
-  __ret_441 = splat_laneq_p16(__s0_441, __p1_441); \
-  __ret_441; \
-})
-#else
-#define vdup_laneq_p16(__p0_442, __p1_442) __extension__ ({ \
-  poly16x8_t __s0_442 = __p0_442; \
-  poly16x8_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_442; \
-  __ret_442 = __noswap_splat_laneq_p16(__rev0_442, __p1_442); \
-  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
-  __ret_442; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p8(__p0_443, __p1_443) __extension__ ({ \
-  poly8x16_t __s0_443 = __p0_443; \
-  poly8x16_t __ret_443; \
-  __ret_443 = splatq_laneq_p8(__s0_443, __p1_443); \
-  __ret_443; \
-})
-#else
-#define vdupq_laneq_p8(__p0_444, __p1_444) __extension__ ({ \
-  poly8x16_t __s0_444 = __p0_444; \
-  poly8x16_t __rev0_444;  __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_444; \
-  __ret_444 = __noswap_splatq_laneq_p8(__rev0_444, __p1_444); \
-  __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_444; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p64(__p0_445, __p1_445) __extension__ ({ \
-  poly64x2_t __s0_445 = __p0_445; \
-  poly64x2_t __ret_445; \
-  __ret_445 = splatq_laneq_p64(__s0_445, __p1_445); \
-  __ret_445; \
-})
-#else
-#define vdupq_laneq_p64(__p0_446, __p1_446) __extension__ ({ \
-  poly64x2_t __s0_446 = __p0_446; \
-  poly64x2_t __rev0_446;  __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 1, 0); \
-  poly64x2_t __ret_446; \
-  __ret_446 = __noswap_splatq_laneq_p64(__rev0_446, __p1_446); \
-  __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 1, 0); \
-  __ret_446; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p16(__p0_447, __p1_447) __extension__ ({ \
-  poly16x8_t __s0_447 = __p0_447; \
-  poly16x8_t __ret_447; \
-  __ret_447 = splatq_laneq_p16(__s0_447, __p1_447); \
-  __ret_447; \
-})
-#else
-#define vdupq_laneq_p16(__p0_448, __p1_448) __extension__ ({ \
-  poly16x8_t __s0_448 = __p0_448; \
-  poly16x8_t __rev0_448;  __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_448; \
-  __ret_448 = __noswap_splatq_laneq_p16(__rev0_448, __p1_448); \
-  __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_448; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u8(__p0_449, __p1_449) __extension__ ({ \
-  uint8x16_t __s0_449 = __p0_449; \
-  uint8x16_t __ret_449; \
-  __ret_449 = splatq_laneq_u8(__s0_449, __p1_449); \
-  __ret_449; \
-})
-#else
-#define vdupq_laneq_u8(__p0_450, __p1_450) __extension__ ({ \
-  uint8x16_t __s0_450 = __p0_450; \
-  uint8x16_t __rev0_450;  __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_450; \
-  __ret_450 = __noswap_splatq_laneq_u8(__rev0_450, __p1_450); \
-  __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_450; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u32(__p0_451, __p1_451) __extension__ ({ \
-  uint32x4_t __s0_451 = __p0_451; \
-  uint32x4_t __ret_451; \
-  __ret_451 = splatq_laneq_u32(__s0_451, __p1_451); \
-  __ret_451; \
-})
-#else
-#define vdupq_laneq_u32(__p0_452, __p1_452) __extension__ ({ \
-  uint32x4_t __s0_452 = __p0_452; \
-  uint32x4_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 3, 2, 1, 0); \
-  uint32x4_t __ret_452; \
-  __ret_452 = __noswap_splatq_laneq_u32(__rev0_452, __p1_452); \
-  __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 3, 2, 1, 0); \
-  __ret_452; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u64(__p0_453, __p1_453) __extension__ ({ \
-  uint64x2_t __s0_453 = __p0_453; \
-  uint64x2_t __ret_453; \
-  __ret_453 = splatq_laneq_u64(__s0_453, __p1_453); \
+#define vdup_laneq_p8(__p0_453, __p1_453) __extension__ ({ \
+  poly8x16_t __s0_453 = __p0_453; \
+  poly8x8_t __ret_453; \
+  __ret_453 = splat_laneq_p8(__s0_453, __p1_453); \
   __ret_453; \
 })
 #else
-#define vdupq_laneq_u64(__p0_454, __p1_454) __extension__ ({ \
-  uint64x2_t __s0_454 = __p0_454; \
-  uint64x2_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 1, 0); \
-  uint64x2_t __ret_454; \
-  __ret_454 = __noswap_splatq_laneq_u64(__rev0_454, __p1_454); \
-  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 1, 0); \
+#define vdup_laneq_p8(__p0_454, __p1_454) __extension__ ({ \
+  poly8x16_t __s0_454 = __p0_454; \
+  poly8x16_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_454; \
+  __ret_454 = __noswap_splat_laneq_p8(__rev0_454, __p1_454); \
+  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_454; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u16(__p0_455, __p1_455) __extension__ ({ \
-  uint16x8_t __s0_455 = __p0_455; \
-  uint16x8_t __ret_455; \
-  __ret_455 = splatq_laneq_u16(__s0_455, __p1_455); \
+#define vdup_laneq_p64(__p0_455, __p1_455) __extension__ ({ \
+  poly64x2_t __s0_455 = __p0_455; \
+  poly64x1_t __ret_455; \
+  __ret_455 = splat_laneq_p64(__s0_455, __p1_455); \
   __ret_455; \
 })
 #else
-#define vdupq_laneq_u16(__p0_456, __p1_456) __extension__ ({ \
-  uint16x8_t __s0_456 = __p0_456; \
-  uint16x8_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_456; \
-  __ret_456 = __noswap_splatq_laneq_u16(__rev0_456, __p1_456); \
-  __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vdup_laneq_p64(__p0_456, __p1_456) __extension__ ({ \
+  poly64x2_t __s0_456 = __p0_456; \
+  poly64x2_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 1, 0); \
+  poly64x1_t __ret_456; \
+  __ret_456 = __noswap_splat_laneq_p64(__rev0_456, __p1_456); \
   __ret_456; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s8(__p0_457, __p1_457) __extension__ ({ \
-  int8x16_t __s0_457 = __p0_457; \
-  int8x16_t __ret_457; \
-  __ret_457 = splatq_laneq_s8(__s0_457, __p1_457); \
+#define vdup_laneq_p16(__p0_457, __p1_457) __extension__ ({ \
+  poly16x8_t __s0_457 = __p0_457; \
+  poly16x4_t __ret_457; \
+  __ret_457 = splat_laneq_p16(__s0_457, __p1_457); \
   __ret_457; \
 })
 #else
-#define vdupq_laneq_s8(__p0_458, __p1_458) __extension__ ({ \
-  int8x16_t __s0_458 = __p0_458; \
-  int8x16_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_458; \
-  __ret_458 = __noswap_splatq_laneq_s8(__rev0_458, __p1_458); \
-  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vdup_laneq_p16(__p0_458, __p1_458) __extension__ ({ \
+  poly16x8_t __s0_458 = __p0_458; \
+  poly16x8_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __ret_458; \
+  __ret_458 = __noswap_splat_laneq_p16(__rev0_458, __p1_458); \
+  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 3, 2, 1, 0); \
   __ret_458; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f64(__p0_459, __p1_459) __extension__ ({ \
-  float64x2_t __s0_459 = __p0_459; \
-  float64x2_t __ret_459; \
-  __ret_459 = splatq_laneq_f64(__s0_459, __p1_459); \
+#define vdupq_laneq_p8(__p0_459, __p1_459) __extension__ ({ \
+  poly8x16_t __s0_459 = __p0_459; \
+  poly8x16_t __ret_459; \
+  __ret_459 = splatq_laneq_p8(__s0_459, __p1_459); \
   __ret_459; \
 })
 #else
-#define vdupq_laneq_f64(__p0_460, __p1_460) __extension__ ({ \
-  float64x2_t __s0_460 = __p0_460; \
-  float64x2_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \
-  float64x2_t __ret_460; \
-  __ret_460 = __noswap_splatq_laneq_f64(__rev0_460, __p1_460); \
-  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \
+#define vdupq_laneq_p8(__p0_460, __p1_460) __extension__ ({ \
+  poly8x16_t __s0_460 = __p0_460; \
+  poly8x16_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_460; \
+  __ret_460 = __noswap_splatq_laneq_p8(__rev0_460, __p1_460); \
+  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_460; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f32(__p0_461, __p1_461) __extension__ ({ \
-  float32x4_t __s0_461 = __p0_461; \
-  float32x4_t __ret_461; \
-  __ret_461 = splatq_laneq_f32(__s0_461, __p1_461); \
+#define vdupq_laneq_p64(__p0_461, __p1_461) __extension__ ({ \
+  poly64x2_t __s0_461 = __p0_461; \
+  poly64x2_t __ret_461; \
+  __ret_461 = splatq_laneq_p64(__s0_461, __p1_461); \
   __ret_461; \
 })
 #else
-#define vdupq_laneq_f32(__p0_462, __p1_462) __extension__ ({ \
-  float32x4_t __s0_462 = __p0_462; \
-  float32x4_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \
-  float32x4_t __ret_462; \
-  __ret_462 = __noswap_splatq_laneq_f32(__rev0_462, __p1_462); \
-  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \
+#define vdupq_laneq_p64(__p0_462, __p1_462) __extension__ ({ \
+  poly64x2_t __s0_462 = __p0_462; \
+  poly64x2_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 1, 0); \
+  poly64x2_t __ret_462; \
+  __ret_462 = __noswap_splatq_laneq_p64(__rev0_462, __p1_462); \
+  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 1, 0); \
   __ret_462; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f16(__p0_463, __p1_463) __extension__ ({ \
-  float16x8_t __s0_463 = __p0_463; \
-  float16x8_t __ret_463; \
-  __ret_463 = splatq_laneq_f16(__s0_463, __p1_463); \
+#define vdupq_laneq_p16(__p0_463, __p1_463) __extension__ ({ \
+  poly16x8_t __s0_463 = __p0_463; \
+  poly16x8_t __ret_463; \
+  __ret_463 = splatq_laneq_p16(__s0_463, __p1_463); \
   __ret_463; \
 })
 #else
-#define vdupq_laneq_f16(__p0_464, __p1_464) __extension__ ({ \
-  float16x8_t __s0_464 = __p0_464; \
-  float16x8_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_464; \
-  __ret_464 = __noswap_splatq_laneq_f16(__rev0_464, __p1_464); \
+#define vdupq_laneq_p16(__p0_464, __p1_464) __extension__ ({ \
+  poly16x8_t __s0_464 = __p0_464; \
+  poly16x8_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __ret_464; \
+  __ret_464 = __noswap_splatq_laneq_p16(__rev0_464, __p1_464); \
   __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_464; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s32(__p0_465, __p1_465) __extension__ ({ \
-  int32x4_t __s0_465 = __p0_465; \
-  int32x4_t __ret_465; \
-  __ret_465 = splatq_laneq_s32(__s0_465, __p1_465); \
+#define vdupq_laneq_u8(__p0_465, __p1_465) __extension__ ({ \
+  uint8x16_t __s0_465 = __p0_465; \
+  uint8x16_t __ret_465; \
+  __ret_465 = splatq_laneq_u8(__s0_465, __p1_465); \
   __ret_465; \
 })
 #else
-#define vdupq_laneq_s32(__p0_466, __p1_466) __extension__ ({ \
-  int32x4_t __s0_466 = __p0_466; \
-  int32x4_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \
-  int32x4_t __ret_466; \
-  __ret_466 = __noswap_splatq_laneq_s32(__rev0_466, __p1_466); \
-  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \
+#define vdupq_laneq_u8(__p0_466, __p1_466) __extension__ ({ \
+  uint8x16_t __s0_466 = __p0_466; \
+  uint8x16_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_466; \
+  __ret_466 = __noswap_splatq_laneq_u8(__rev0_466, __p1_466); \
+  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_466; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s64(__p0_467, __p1_467) __extension__ ({ \
-  int64x2_t __s0_467 = __p0_467; \
-  int64x2_t __ret_467; \
-  __ret_467 = splatq_laneq_s64(__s0_467, __p1_467); \
+#define vdupq_laneq_u32(__p0_467, __p1_467) __extension__ ({ \
+  uint32x4_t __s0_467 = __p0_467; \
+  uint32x4_t __ret_467; \
+  __ret_467 = splatq_laneq_u32(__s0_467, __p1_467); \
   __ret_467; \
 })
 #else
-#define vdupq_laneq_s64(__p0_468, __p1_468) __extension__ ({ \
-  int64x2_t __s0_468 = __p0_468; \
-  int64x2_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 1, 0); \
-  int64x2_t __ret_468; \
-  __ret_468 = __noswap_splatq_laneq_s64(__rev0_468, __p1_468); \
-  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 1, 0); \
+#define vdupq_laneq_u32(__p0_468, __p1_468) __extension__ ({ \
+  uint32x4_t __s0_468 = __p0_468; \
+  uint32x4_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 3, 2, 1, 0); \
+  uint32x4_t __ret_468; \
+  __ret_468 = __noswap_splatq_laneq_u32(__rev0_468, __p1_468); \
+  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 3, 2, 1, 0); \
   __ret_468; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s16(__p0_469, __p1_469) __extension__ ({ \
-  int16x8_t __s0_469 = __p0_469; \
-  int16x8_t __ret_469; \
-  __ret_469 = splatq_laneq_s16(__s0_469, __p1_469); \
+#define vdupq_laneq_u64(__p0_469, __p1_469) __extension__ ({ \
+  uint64x2_t __s0_469 = __p0_469; \
+  uint64x2_t __ret_469; \
+  __ret_469 = splatq_laneq_u64(__s0_469, __p1_469); \
   __ret_469; \
 })
 #else
-#define vdupq_laneq_s16(__p0_470, __p1_470) __extension__ ({ \
-  int16x8_t __s0_470 = __p0_470; \
-  int16x8_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_470; \
-  __ret_470 = __noswap_splatq_laneq_s16(__rev0_470, __p1_470); \
-  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 7, 6, 5, 4, 3, 2, 1, 0); \
+#define vdupq_laneq_u64(__p0_470, __p1_470) __extension__ ({ \
+  uint64x2_t __s0_470 = __p0_470; \
+  uint64x2_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 1, 0); \
+  uint64x2_t __ret_470; \
+  __ret_470 = __noswap_splatq_laneq_u64(__rev0_470, __p1_470); \
+  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 1, 0); \
   __ret_470; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u8(__p0_471, __p1_471) __extension__ ({ \
-  uint8x16_t __s0_471 = __p0_471; \
-  uint8x8_t __ret_471; \
-  __ret_471 = splat_laneq_u8(__s0_471, __p1_471); \
+#define vdupq_laneq_u16(__p0_471, __p1_471) __extension__ ({ \
+  uint16x8_t __s0_471 = __p0_471; \
+  uint16x8_t __ret_471; \
+  __ret_471 = splatq_laneq_u16(__s0_471, __p1_471); \
   __ret_471; \
 })
 #else
-#define vdup_laneq_u8(__p0_472, __p1_472) __extension__ ({ \
-  uint8x16_t __s0_472 = __p0_472; \
-  uint8x16_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_472; \
-  __ret_472 = __noswap_splat_laneq_u8(__rev0_472, __p1_472); \
+#define vdupq_laneq_u16(__p0_472, __p1_472) __extension__ ({ \
+  uint16x8_t __s0_472 = __p0_472; \
+  uint16x8_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_472; \
+  __ret_472 = __noswap_splatq_laneq_u16(__rev0_472, __p1_472); \
   __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_472; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u32(__p0_473, __p1_473) __extension__ ({ \
-  uint32x4_t __s0_473 = __p0_473; \
-  uint32x2_t __ret_473; \
-  __ret_473 = splat_laneq_u32(__s0_473, __p1_473); \
+#define vdupq_laneq_s8(__p0_473, __p1_473) __extension__ ({ \
+  int8x16_t __s0_473 = __p0_473; \
+  int8x16_t __ret_473; \
+  __ret_473 = splatq_laneq_s8(__s0_473, __p1_473); \
   __ret_473; \
 })
 #else
-#define vdup_laneq_u32(__p0_474, __p1_474) __extension__ ({ \
-  uint32x4_t __s0_474 = __p0_474; \
-  uint32x4_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 3, 2, 1, 0); \
-  uint32x2_t __ret_474; \
-  __ret_474 = __noswap_splat_laneq_u32(__rev0_474, __p1_474); \
-  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 1, 0); \
+#define vdupq_laneq_s8(__p0_474, __p1_474) __extension__ ({ \
+  int8x16_t __s0_474 = __p0_474; \
+  int8x16_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_474; \
+  __ret_474 = __noswap_splatq_laneq_s8(__rev0_474, __p1_474); \
+  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_474; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u64(__p0_475, __p1_475) __extension__ ({ \
-  uint64x2_t __s0_475 = __p0_475; \
-  uint64x1_t __ret_475; \
-  __ret_475 = splat_laneq_u64(__s0_475, __p1_475); \
+#define vdupq_laneq_f64(__p0_475, __p1_475) __extension__ ({ \
+  float64x2_t __s0_475 = __p0_475; \
+  float64x2_t __ret_475; \
+  __ret_475 = splatq_laneq_f64(__s0_475, __p1_475); \
   __ret_475; \
 })
 #else
-#define vdup_laneq_u64(__p0_476, __p1_476) __extension__ ({ \
-  uint64x2_t __s0_476 = __p0_476; \
-  uint64x2_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 1, 0); \
-  uint64x1_t __ret_476; \
-  __ret_476 = __noswap_splat_laneq_u64(__rev0_476, __p1_476); \
+#define vdupq_laneq_f64(__p0_476, __p1_476) __extension__ ({ \
+  float64x2_t __s0_476 = __p0_476; \
+  float64x2_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 1, 0); \
+  float64x2_t __ret_476; \
+  __ret_476 = __noswap_splatq_laneq_f64(__rev0_476, __p1_476); \
+  __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 1, 0); \
   __ret_476; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u16(__p0_477, __p1_477) __extension__ ({ \
-  uint16x8_t __s0_477 = __p0_477; \
-  uint16x4_t __ret_477; \
-  __ret_477 = splat_laneq_u16(__s0_477, __p1_477); \
+#define vdupq_laneq_f32(__p0_477, __p1_477) __extension__ ({ \
+  float32x4_t __s0_477 = __p0_477; \
+  float32x4_t __ret_477; \
+  __ret_477 = splatq_laneq_f32(__s0_477, __p1_477); \
   __ret_477; \
 })
 #else
-#define vdup_laneq_u16(__p0_478, __p1_478) __extension__ ({ \
-  uint16x8_t __s0_478 = __p0_478; \
-  uint16x8_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_478; \
-  __ret_478 = __noswap_splat_laneq_u16(__rev0_478, __p1_478); \
+#define vdupq_laneq_f32(__p0_478, __p1_478) __extension__ ({ \
+  float32x4_t __s0_478 = __p0_478; \
+  float32x4_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 3, 2, 1, 0); \
+  float32x4_t __ret_478; \
+  __ret_478 = __noswap_splatq_laneq_f32(__rev0_478, __p1_478); \
   __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \
   __ret_478; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s8(__p0_479, __p1_479) __extension__ ({ \
-  int8x16_t __s0_479 = __p0_479; \
-  int8x8_t __ret_479; \
-  __ret_479 = splat_laneq_s8(__s0_479, __p1_479); \
+#define vdupq_laneq_f16(__p0_479, __p1_479) __extension__ ({ \
+  float16x8_t __s0_479 = __p0_479; \
+  float16x8_t __ret_479; \
+  __ret_479 = splatq_laneq_f16(__s0_479, __p1_479); \
   __ret_479; \
 })
 #else
-#define vdup_laneq_s8(__p0_480, __p1_480) __extension__ ({ \
-  int8x16_t __s0_480 = __p0_480; \
-  int8x16_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_480; \
-  __ret_480 = __noswap_splat_laneq_s8(__rev0_480, __p1_480); \
+#define vdupq_laneq_f16(__p0_480, __p1_480) __extension__ ({ \
+  float16x8_t __s0_480 = __p0_480; \
+  float16x8_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_480; \
+  __ret_480 = __noswap_splatq_laneq_f16(__rev0_480, __p1_480); \
   __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_480; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f64(__p0_481, __p1_481) __extension__ ({ \
-  float64x2_t __s0_481 = __p0_481; \
-  float64x1_t __ret_481; \
-  __ret_481 = splat_laneq_f64(__s0_481, __p1_481); \
+#define vdupq_laneq_s32(__p0_481, __p1_481) __extension__ ({ \
+  int32x4_t __s0_481 = __p0_481; \
+  int32x4_t __ret_481; \
+  __ret_481 = splatq_laneq_s32(__s0_481, __p1_481); \
   __ret_481; \
 })
 #else
-#define vdup_laneq_f64(__p0_482, __p1_482) __extension__ ({ \
-  float64x2_t __s0_482 = __p0_482; \
-  float64x2_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \
-  float64x1_t __ret_482; \
-  __ret_482 = __noswap_splat_laneq_f64(__rev0_482, __p1_482); \
+#define vdupq_laneq_s32(__p0_482, __p1_482) __extension__ ({ \
+  int32x4_t __s0_482 = __p0_482; \
+  int32x4_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 3, 2, 1, 0); \
+  int32x4_t __ret_482; \
+  __ret_482 = __noswap_splatq_laneq_s32(__rev0_482, __p1_482); \
+  __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 3, 2, 1, 0); \
   __ret_482; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f32(__p0_483, __p1_483) __extension__ ({ \
-  float32x4_t __s0_483 = __p0_483; \
-  float32x2_t __ret_483; \
-  __ret_483 = splat_laneq_f32(__s0_483, __p1_483); \
+#define vdupq_laneq_s64(__p0_483, __p1_483) __extension__ ({ \
+  int64x2_t __s0_483 = __p0_483; \
+  int64x2_t __ret_483; \
+  __ret_483 = splatq_laneq_s64(__s0_483, __p1_483); \
   __ret_483; \
 })
 #else
-#define vdup_laneq_f32(__p0_484, __p1_484) __extension__ ({ \
-  float32x4_t __s0_484 = __p0_484; \
-  float32x4_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 3, 2, 1, 0); \
-  float32x2_t __ret_484; \
-  __ret_484 = __noswap_splat_laneq_f32(__rev0_484, __p1_484); \
+#define vdupq_laneq_s64(__p0_484, __p1_484) __extension__ ({ \
+  int64x2_t __s0_484 = __p0_484; \
+  int64x2_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 1, 0); \
+  int64x2_t __ret_484; \
+  __ret_484 = __noswap_splatq_laneq_s64(__rev0_484, __p1_484); \
   __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 1, 0); \
   __ret_484; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f16(__p0_485, __p1_485) __extension__ ({ \
-  float16x8_t __s0_485 = __p0_485; \
-  float16x4_t __ret_485; \
-  __ret_485 = splat_laneq_f16(__s0_485, __p1_485); \
+#define vdupq_laneq_s16(__p0_485, __p1_485) __extension__ ({ \
+  int16x8_t __s0_485 = __p0_485; \
+  int16x8_t __ret_485; \
+  __ret_485 = splatq_laneq_s16(__s0_485, __p1_485); \
   __ret_485; \
 })
 #else
-#define vdup_laneq_f16(__p0_486, __p1_486) __extension__ ({ \
-  float16x8_t __s0_486 = __p0_486; \
-  float16x8_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_486; \
-  __ret_486 = __noswap_splat_laneq_f16(__rev0_486, __p1_486); \
-  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 3, 2, 1, 0); \
+#define vdupq_laneq_s16(__p0_486, __p1_486) __extension__ ({ \
+  int16x8_t __s0_486 = __p0_486; \
+  int16x8_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_486; \
+  __ret_486 = __noswap_splatq_laneq_s16(__rev0_486, __p1_486); \
+  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_486; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s32(__p0_487, __p1_487) __extension__ ({ \
-  int32x4_t __s0_487 = __p0_487; \
-  int32x2_t __ret_487; \
-  __ret_487 = splat_laneq_s32(__s0_487, __p1_487); \
+#define vdup_laneq_u8(__p0_487, __p1_487) __extension__ ({ \
+  uint8x16_t __s0_487 = __p0_487; \
+  uint8x8_t __ret_487; \
+  __ret_487 = splat_laneq_u8(__s0_487, __p1_487); \
   __ret_487; \
 })
 #else
-#define vdup_laneq_s32(__p0_488, __p1_488) __extension__ ({ \
-  int32x4_t __s0_488 = __p0_488; \
-  int32x4_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 3, 2, 1, 0); \
-  int32x2_t __ret_488; \
-  __ret_488 = __noswap_splat_laneq_s32(__rev0_488, __p1_488); \
-  __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \
+#define vdup_laneq_u8(__p0_488, __p1_488) __extension__ ({ \
+  uint8x16_t __s0_488 = __p0_488; \
+  uint8x16_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_488; \
+  __ret_488 = __noswap_splat_laneq_u8(__rev0_488, __p1_488); \
+  __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_488; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s64(__p0_489, __p1_489) __extension__ ({ \
-  int64x2_t __s0_489 = __p0_489; \
-  int64x1_t __ret_489; \
-  __ret_489 = splat_laneq_s64(__s0_489, __p1_489); \
+#define vdup_laneq_u32(__p0_489, __p1_489) __extension__ ({ \
+  uint32x4_t __s0_489 = __p0_489; \
+  uint32x2_t __ret_489; \
+  __ret_489 = splat_laneq_u32(__s0_489, __p1_489); \
   __ret_489; \
 })
 #else
-#define vdup_laneq_s64(__p0_490, __p1_490) __extension__ ({ \
-  int64x2_t __s0_490 = __p0_490; \
-  int64x2_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 1, 0); \
-  int64x1_t __ret_490; \
-  __ret_490 = __noswap_splat_laneq_s64(__rev0_490, __p1_490); \
+#define vdup_laneq_u32(__p0_490, __p1_490) __extension__ ({ \
+  uint32x4_t __s0_490 = __p0_490; \
+  uint32x4_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 3, 2, 1, 0); \
+  uint32x2_t __ret_490; \
+  __ret_490 = __noswap_splat_laneq_u32(__rev0_490, __p1_490); \
+  __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 1, 0); \
   __ret_490; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s16(__p0_491, __p1_491) __extension__ ({ \
-  int16x8_t __s0_491 = __p0_491; \
-  int16x4_t __ret_491; \
-  __ret_491 = splat_laneq_s16(__s0_491, __p1_491); \
+#define vdup_laneq_u64(__p0_491, __p1_491) __extension__ ({ \
+  uint64x2_t __s0_491 = __p0_491; \
+  uint64x1_t __ret_491; \
+  __ret_491 = splat_laneq_u64(__s0_491, __p1_491); \
   __ret_491; \
 })
 #else
-#define vdup_laneq_s16(__p0_492, __p1_492) __extension__ ({ \
-  int16x8_t __s0_492 = __p0_492; \
-  int16x8_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_492; \
-  __ret_492 = __noswap_splat_laneq_s16(__rev0_492, __p1_492); \
-  __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 3, 2, 1, 0); \
+#define vdup_laneq_u64(__p0_492, __p1_492) __extension__ ({ \
+  uint64x2_t __s0_492 = __p0_492; \
+  uint64x2_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 1, 0); \
+  uint64x1_t __ret_492; \
+  __ret_492 = __noswap_splat_laneq_u64(__rev0_492, __p1_492); \
   __ret_492; \
 })
 #endif
 
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_u16(__p0_493, __p1_493) __extension__ ({ \
+  uint16x8_t __s0_493 = __p0_493; \
+  uint16x4_t __ret_493; \
+  __ret_493 = splat_laneq_u16(__s0_493, __p1_493); \
+  __ret_493; \
+})
+#else
+#define vdup_laneq_u16(__p0_494, __p1_494) __extension__ ({ \
+  uint16x8_t __s0_494 = __p0_494; \
+  uint16x8_t __rev0_494;  __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_494; \
+  __ret_494 = __noswap_splat_laneq_u16(__rev0_494, __p1_494); \
+  __ret_494 = __builtin_shufflevector(__ret_494, __ret_494, 3, 2, 1, 0); \
+  __ret_494; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s8(__p0_495, __p1_495) __extension__ ({ \
+  int8x16_t __s0_495 = __p0_495; \
+  int8x8_t __ret_495; \
+  __ret_495 = splat_laneq_s8(__s0_495, __p1_495); \
+  __ret_495; \
+})
+#else
+#define vdup_laneq_s8(__p0_496, __p1_496) __extension__ ({ \
+  int8x16_t __s0_496 = __p0_496; \
+  int8x16_t __rev0_496;  __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_496; \
+  __ret_496 = __noswap_splat_laneq_s8(__rev0_496, __p1_496); \
+  __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_496; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f64(__p0_497, __p1_497) __extension__ ({ \
+  float64x2_t __s0_497 = __p0_497; \
+  float64x1_t __ret_497; \
+  __ret_497 = splat_laneq_f64(__s0_497, __p1_497); \
+  __ret_497; \
+})
+#else
+#define vdup_laneq_f64(__p0_498, __p1_498) __extension__ ({ \
+  float64x2_t __s0_498 = __p0_498; \
+  float64x2_t __rev0_498;  __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 1, 0); \
+  float64x1_t __ret_498; \
+  __ret_498 = __noswap_splat_laneq_f64(__rev0_498, __p1_498); \
+  __ret_498; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f32(__p0_499, __p1_499) __extension__ ({ \
+  float32x4_t __s0_499 = __p0_499; \
+  float32x2_t __ret_499; \
+  __ret_499 = splat_laneq_f32(__s0_499, __p1_499); \
+  __ret_499; \
+})
+#else
+#define vdup_laneq_f32(__p0_500, __p1_500) __extension__ ({ \
+  float32x4_t __s0_500 = __p0_500; \
+  float32x4_t __rev0_500;  __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 3, 2, 1, 0); \
+  float32x2_t __ret_500; \
+  __ret_500 = __noswap_splat_laneq_f32(__rev0_500, __p1_500); \
+  __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \
+  __ret_500; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_f16(__p0_501, __p1_501) __extension__ ({ \
+  float16x8_t __s0_501 = __p0_501; \
+  float16x4_t __ret_501; \
+  __ret_501 = splat_laneq_f16(__s0_501, __p1_501); \
+  __ret_501; \
+})
+#else
+#define vdup_laneq_f16(__p0_502, __p1_502) __extension__ ({ \
+  float16x8_t __s0_502 = __p0_502; \
+  float16x8_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __ret_502; \
+  __ret_502 = __noswap_splat_laneq_f16(__rev0_502, __p1_502); \
+  __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 3, 2, 1, 0); \
+  __ret_502; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s32(__p0_503, __p1_503) __extension__ ({ \
+  int32x4_t __s0_503 = __p0_503; \
+  int32x2_t __ret_503; \
+  __ret_503 = splat_laneq_s32(__s0_503, __p1_503); \
+  __ret_503; \
+})
+#else
+#define vdup_laneq_s32(__p0_504, __p1_504) __extension__ ({ \
+  int32x4_t __s0_504 = __p0_504; \
+  int32x4_t __rev0_504;  __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 3, 2, 1, 0); \
+  int32x2_t __ret_504; \
+  __ret_504 = __noswap_splat_laneq_s32(__rev0_504, __p1_504); \
+  __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 1, 0); \
+  __ret_504; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s64(__p0_505, __p1_505) __extension__ ({ \
+  int64x2_t __s0_505 = __p0_505; \
+  int64x1_t __ret_505; \
+  __ret_505 = splat_laneq_s64(__s0_505, __p1_505); \
+  __ret_505; \
+})
+#else
+#define vdup_laneq_s64(__p0_506, __p1_506) __extension__ ({ \
+  int64x2_t __s0_506 = __p0_506; \
+  int64x2_t __rev0_506;  __rev0_506 = __builtin_shufflevector(__s0_506, __s0_506, 1, 0); \
+  int64x1_t __ret_506; \
+  __ret_506 = __noswap_splat_laneq_s64(__rev0_506, __p1_506); \
+  __ret_506; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_s16(__p0_507, __p1_507) __extension__ ({ \
+  int16x8_t __s0_507 = __p0_507; \
+  int16x4_t __ret_507; \
+  __ret_507 = splat_laneq_s16(__s0_507, __p1_507); \
+  __ret_507; \
+})
+#else
+#define vdup_laneq_s16(__p0_508, __p1_508) __extension__ ({ \
+  int16x8_t __s0_508 = __p0_508; \
+  int16x8_t __rev0_508;  __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_508; \
+  __ret_508 = __noswap_splat_laneq_s16(__rev0_508, __p1_508); \
+  __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 3, 2, 1, 0); \
+  __ret_508; \
+})
+#endif
+
 __ai poly64x1_t vdup_n_p64(poly64_t __p0) {
   poly64x1_t __ret;
   __ret = (poly64x1_t) {__p0};
@@ -52903,246 +53091,246 @@
   __ret = vfma_f64(__p0, -__p1, __p2);
   return __ret;
 }
-#define vfmsd_lane_f64(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \
-  float64_t __s0_493 = __p0_493; \
-  float64_t __s1_493 = __p1_493; \
-  float64x1_t __s2_493 = __p2_493; \
-  float64_t __ret_493; \
-  __ret_493 = vfmad_lane_f64(__s0_493, -__s1_493, __s2_493, __p3_493); \
-  __ret_493; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_lane_f32(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \
-  float32_t __s0_494 = __p0_494; \
-  float32_t __s1_494 = __p1_494; \
-  float32x2_t __s2_494 = __p2_494; \
-  float32_t __ret_494; \
-  __ret_494 = vfmas_lane_f32(__s0_494, -__s1_494, __s2_494, __p3_494); \
-  __ret_494; \
-})
-#else
-#define vfmss_lane_f32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \
-  float32_t __s0_495 = __p0_495; \
-  float32_t __s1_495 = __p1_495; \
-  float32x2_t __s2_495 = __p2_495; \
-  float32x2_t __rev2_495;  __rev2_495 = __builtin_shufflevector(__s2_495, __s2_495, 1, 0); \
-  float32_t __ret_495; \
-  __ret_495 = __noswap_vfmas_lane_f32(__s0_495, -__s1_495, __rev2_495, __p3_495); \
-  __ret_495; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f64(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \
-  float64x2_t __s0_496 = __p0_496; \
-  float64x2_t __s1_496 = __p1_496; \
-  float64x1_t __s2_496 = __p2_496; \
-  float64x2_t __ret_496; \
-  __ret_496 = vfmaq_lane_f64(__s0_496, -__s1_496, __s2_496, __p3_496); \
-  __ret_496; \
-})
-#else
-#define vfmsq_lane_f64(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \
-  float64x2_t __s0_497 = __p0_497; \
-  float64x2_t __s1_497 = __p1_497; \
-  float64x1_t __s2_497 = __p2_497; \
-  float64x2_t __rev0_497;  __rev0_497 = __builtin_shufflevector(__s0_497, __s0_497, 1, 0); \
-  float64x2_t __rev1_497;  __rev1_497 = __builtin_shufflevector(__s1_497, __s1_497, 1, 0); \
-  float64x2_t __ret_497; \
-  __ret_497 = __noswap_vfmaq_lane_f64(__rev0_497, -__rev1_497, __s2_497, __p3_497); \
-  __ret_497 = __builtin_shufflevector(__ret_497, __ret_497, 1, 0); \
-  __ret_497; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f32(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \
-  float32x4_t __s0_498 = __p0_498; \
-  float32x4_t __s1_498 = __p1_498; \
-  float32x2_t __s2_498 = __p2_498; \
-  float32x4_t __ret_498; \
-  __ret_498 = vfmaq_lane_f32(__s0_498, -__s1_498, __s2_498, __p3_498); \
-  __ret_498; \
-})
-#else
-#define vfmsq_lane_f32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \
-  float32x4_t __s0_499 = __p0_499; \
-  float32x4_t __s1_499 = __p1_499; \
-  float32x2_t __s2_499 = __p2_499; \
-  float32x4_t __rev0_499;  __rev0_499 = __builtin_shufflevector(__s0_499, __s0_499, 3, 2, 1, 0); \
-  float32x4_t __rev1_499;  __rev1_499 = __builtin_shufflevector(__s1_499, __s1_499, 3, 2, 1, 0); \
-  float32x2_t __rev2_499;  __rev2_499 = __builtin_shufflevector(__s2_499, __s2_499, 1, 0); \
-  float32x4_t __ret_499; \
-  __ret_499 = __noswap_vfmaq_lane_f32(__rev0_499, -__rev1_499, __rev2_499, __p3_499); \
-  __ret_499 = __builtin_shufflevector(__ret_499, __ret_499, 3, 2, 1, 0); \
-  __ret_499; \
-})
-#endif
-
-#define vfms_lane_f64(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \
-  float64x1_t __s0_500 = __p0_500; \
-  float64x1_t __s1_500 = __p1_500; \
-  float64x1_t __s2_500 = __p2_500; \
-  float64x1_t __ret_500; \
-  __ret_500 = vfma_lane_f64(__s0_500, -__s1_500, __s2_500, __p3_500); \
-  __ret_500; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f32(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \
-  float32x2_t __s0_501 = __p0_501; \
-  float32x2_t __s1_501 = __p1_501; \
-  float32x2_t __s2_501 = __p2_501; \
-  float32x2_t __ret_501; \
-  __ret_501 = vfma_lane_f32(__s0_501, -__s1_501, __s2_501, __p3_501); \
-  __ret_501; \
-})
-#else
-#define vfms_lane_f32(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \
-  float32x2_t __s0_502 = __p0_502; \
-  float32x2_t __s1_502 = __p1_502; \
-  float32x2_t __s2_502 = __p2_502; \
-  float32x2_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 1, 0); \
-  float32x2_t __rev1_502;  __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 1, 0); \
-  float32x2_t __rev2_502;  __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 1, 0); \
-  float32x2_t __ret_502; \
-  __ret_502 = __noswap_vfma_lane_f32(__rev0_502, -__rev1_502, __rev2_502, __p3_502); \
-  __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 1, 0); \
-  __ret_502; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsd_laneq_f64(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \
-  float64_t __s0_503 = __p0_503; \
-  float64_t __s1_503 = __p1_503; \
-  float64x2_t __s2_503 = __p2_503; \
-  float64_t __ret_503; \
-  __ret_503 = vfmad_laneq_f64(__s0_503, -__s1_503, __s2_503, __p3_503); \
-  __ret_503; \
-})
-#else
-#define vfmsd_laneq_f64(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \
-  float64_t __s0_504 = __p0_504; \
-  float64_t __s1_504 = __p1_504; \
-  float64x2_t __s2_504 = __p2_504; \
-  float64x2_t __rev2_504;  __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 1, 0); \
-  float64_t __ret_504; \
-  __ret_504 = __noswap_vfmad_laneq_f64(__s0_504, -__s1_504, __rev2_504, __p3_504); \
-  __ret_504; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_laneq_f32(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \
-  float32_t __s0_505 = __p0_505; \
-  float32_t __s1_505 = __p1_505; \
-  float32x4_t __s2_505 = __p2_505; \
-  float32_t __ret_505; \
-  __ret_505 = vfmas_laneq_f32(__s0_505, -__s1_505, __s2_505, __p3_505); \
-  __ret_505; \
-})
-#else
-#define vfmss_laneq_f32(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \
-  float32_t __s0_506 = __p0_506; \
-  float32_t __s1_506 = __p1_506; \
-  float32x4_t __s2_506 = __p2_506; \
-  float32x4_t __rev2_506;  __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 3, 2, 1, 0); \
-  float32_t __ret_506; \
-  __ret_506 = __noswap_vfmas_laneq_f32(__s0_506, -__s1_506, __rev2_506, __p3_506); \
-  __ret_506; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f64(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \
-  float64x2_t __s0_507 = __p0_507; \
-  float64x2_t __s1_507 = __p1_507; \
-  float64x2_t __s2_507 = __p2_507; \
-  float64x2_t __ret_507; \
-  __ret_507 = vfmaq_laneq_f64(__s0_507, -__s1_507, __s2_507, __p3_507); \
-  __ret_507; \
-})
-#else
-#define vfmsq_laneq_f64(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \
-  float64x2_t __s0_508 = __p0_508; \
-  float64x2_t __s1_508 = __p1_508; \
-  float64x2_t __s2_508 = __p2_508; \
-  float64x2_t __rev0_508;  __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \
-  float64x2_t __rev1_508;  __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \
-  float64x2_t __rev2_508;  __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 1, 0); \
-  float64x2_t __ret_508; \
-  __ret_508 = __noswap_vfmaq_laneq_f64(__rev0_508, -__rev1_508, __rev2_508, __p3_508); \
-  __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \
-  __ret_508; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f32(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
-  float32x4_t __s0_509 = __p0_509; \
-  float32x4_t __s1_509 = __p1_509; \
-  float32x4_t __s2_509 = __p2_509; \
-  float32x4_t __ret_509; \
-  __ret_509 = vfmaq_laneq_f32(__s0_509, -__s1_509, __s2_509, __p3_509); \
+#define vfmsd_lane_f64(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
+  float64_t __s0_509 = __p0_509; \
+  float64_t __s1_509 = __p1_509; \
+  float64x1_t __s2_509 = __p2_509; \
+  float64_t __ret_509; \
+  __ret_509 = vfmad_lane_f64(__s0_509, -__s1_509, __s2_509, __p3_509); \
   __ret_509; \
 })
-#else
-#define vfmsq_laneq_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
-  float32x4_t __s0_510 = __p0_510; \
-  float32x4_t __s1_510 = __p1_510; \
-  float32x4_t __s2_510 = __p2_510; \
-  float32x4_t __rev0_510;  __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \
-  float32x4_t __rev1_510;  __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \
-  float32x4_t __rev2_510;  __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 3, 2, 1, 0); \
-  float32x4_t __ret_510; \
-  __ret_510 = __noswap_vfmaq_laneq_f32(__rev0_510, -__rev1_510, __rev2_510, __p3_510); \
-  __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \
+#ifdef __LITTLE_ENDIAN__
+#define vfmss_lane_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
+  float32_t __s0_510 = __p0_510; \
+  float32_t __s1_510 = __p1_510; \
+  float32x2_t __s2_510 = __p2_510; \
+  float32_t __ret_510; \
+  __ret_510 = vfmas_lane_f32(__s0_510, -__s1_510, __s2_510, __p3_510); \
   __ret_510; \
 })
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f64(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \
-  float64x1_t __s0_511 = __p0_511; \
-  float64x1_t __s1_511 = __p1_511; \
-  float64x2_t __s2_511 = __p2_511; \
-  float64x1_t __ret_511; \
-  __ret_511 = vfma_laneq_f64(__s0_511, -__s1_511, __s2_511, __p3_511); \
+#else
+#define vfmss_lane_f32(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \
+  float32_t __s0_511 = __p0_511; \
+  float32_t __s1_511 = __p1_511; \
+  float32x2_t __s2_511 = __p2_511; \
+  float32x2_t __rev2_511;  __rev2_511 = __builtin_shufflevector(__s2_511, __s2_511, 1, 0); \
+  float32_t __ret_511; \
+  __ret_511 = __noswap_vfmas_lane_f32(__s0_511, -__s1_511, __rev2_511, __p3_511); \
   __ret_511; \
 })
-#else
-#define vfms_laneq_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \
-  float64x1_t __s0_512 = __p0_512; \
-  float64x1_t __s1_512 = __p1_512; \
-  float64x2_t __s2_512 = __p2_512; \
-  float64x2_t __rev2_512;  __rev2_512 = __builtin_shufflevector(__s2_512, __s2_512, 1, 0); \
-  float64x1_t __ret_512; \
-  __ret_512 = __noswap_vfma_laneq_f64(__s0_512, -__s1_512, __rev2_512, __p3_512); \
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_lane_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \
+  float64x2_t __s0_512 = __p0_512; \
+  float64x2_t __s1_512 = __p1_512; \
+  float64x1_t __s2_512 = __p2_512; \
+  float64x2_t __ret_512; \
+  __ret_512 = vfmaq_lane_f64(__s0_512, -__s1_512, __s2_512, __p3_512); \
   __ret_512; \
 })
+#else
+#define vfmsq_lane_f64(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \
+  float64x2_t __s0_513 = __p0_513; \
+  float64x2_t __s1_513 = __p1_513; \
+  float64x1_t __s2_513 = __p2_513; \
+  float64x2_t __rev0_513;  __rev0_513 = __builtin_shufflevector(__s0_513, __s0_513, 1, 0); \
+  float64x2_t __rev1_513;  __rev1_513 = __builtin_shufflevector(__s1_513, __s1_513, 1, 0); \
+  float64x2_t __ret_513; \
+  __ret_513 = __noswap_vfmaq_lane_f64(__rev0_513, -__rev1_513, __s2_513, __p3_513); \
+  __ret_513 = __builtin_shufflevector(__ret_513, __ret_513, 1, 0); \
+  __ret_513; \
+})
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \
-  float32x2_t __s0_513 = __p0_513; \
-  float32x2_t __s1_513 = __p1_513; \
-  float32x4_t __s2_513 = __p2_513; \
-  float32x2_t __ret_513; \
-  __ret_513 = vfma_laneq_f32(__s0_513, -__s1_513, __s2_513, __p3_513); \
-  __ret_513; \
+#define vfmsq_lane_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \
+  float32x4_t __s0_514 = __p0_514; \
+  float32x4_t __s1_514 = __p1_514; \
+  float32x2_t __s2_514 = __p2_514; \
+  float32x4_t __ret_514; \
+  __ret_514 = vfmaq_lane_f32(__s0_514, -__s1_514, __s2_514, __p3_514); \
+  __ret_514; \
 })
 #else
-#define vfms_laneq_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \
-  float32x2_t __s0_514 = __p0_514; \
-  float32x2_t __s1_514 = __p1_514; \
-  float32x4_t __s2_514 = __p2_514; \
-  float32x2_t __rev0_514;  __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 1, 0); \
-  float32x2_t __rev1_514;  __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 1, 0); \
-  float32x4_t __rev2_514;  __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 3, 2, 1, 0); \
-  float32x2_t __ret_514; \
-  __ret_514 = __noswap_vfma_laneq_f32(__rev0_514, -__rev1_514, __rev2_514, __p3_514); \
-  __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 1, 0); \
-  __ret_514; \
+#define vfmsq_lane_f32(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \
+  float32x4_t __s0_515 = __p0_515; \
+  float32x4_t __s1_515 = __p1_515; \
+  float32x2_t __s2_515 = __p2_515; \
+  float32x4_t __rev0_515;  __rev0_515 = __builtin_shufflevector(__s0_515, __s0_515, 3, 2, 1, 0); \
+  float32x4_t __rev1_515;  __rev1_515 = __builtin_shufflevector(__s1_515, __s1_515, 3, 2, 1, 0); \
+  float32x2_t __rev2_515;  __rev2_515 = __builtin_shufflevector(__s2_515, __s2_515, 1, 0); \
+  float32x4_t __ret_515; \
+  __ret_515 = __noswap_vfmaq_lane_f32(__rev0_515, -__rev1_515, __rev2_515, __p3_515); \
+  __ret_515 = __builtin_shufflevector(__ret_515, __ret_515, 3, 2, 1, 0); \
+  __ret_515; \
+})
+#endif
+
+#define vfms_lane_f64(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \
+  float64x1_t __s0_516 = __p0_516; \
+  float64x1_t __s1_516 = __p1_516; \
+  float64x1_t __s2_516 = __p2_516; \
+  float64x1_t __ret_516; \
+  __ret_516 = vfma_lane_f64(__s0_516, -__s1_516, __s2_516, __p3_516); \
+  __ret_516; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define vfms_lane_f32(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \
+  float32x2_t __s0_517 = __p0_517; \
+  float32x2_t __s1_517 = __p1_517; \
+  float32x2_t __s2_517 = __p2_517; \
+  float32x2_t __ret_517; \
+  __ret_517 = vfma_lane_f32(__s0_517, -__s1_517, __s2_517, __p3_517); \
+  __ret_517; \
+})
+#else
+#define vfms_lane_f32(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \
+  float32x2_t __s0_518 = __p0_518; \
+  float32x2_t __s1_518 = __p1_518; \
+  float32x2_t __s2_518 = __p2_518; \
+  float32x2_t __rev0_518;  __rev0_518 = __builtin_shufflevector(__s0_518, __s0_518, 1, 0); \
+  float32x2_t __rev1_518;  __rev1_518 = __builtin_shufflevector(__s1_518, __s1_518, 1, 0); \
+  float32x2_t __rev2_518;  __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 1, 0); \
+  float32x2_t __ret_518; \
+  __ret_518 = __noswap_vfma_lane_f32(__rev0_518, -__rev1_518, __rev2_518, __p3_518); \
+  __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 1, 0); \
+  __ret_518; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsd_laneq_f64(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \
+  float64_t __s0_519 = __p0_519; \
+  float64_t __s1_519 = __p1_519; \
+  float64x2_t __s2_519 = __p2_519; \
+  float64_t __ret_519; \
+  __ret_519 = vfmad_laneq_f64(__s0_519, -__s1_519, __s2_519, __p3_519); \
+  __ret_519; \
+})
+#else
+#define vfmsd_laneq_f64(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \
+  float64_t __s0_520 = __p0_520; \
+  float64_t __s1_520 = __p1_520; \
+  float64x2_t __s2_520 = __p2_520; \
+  float64x2_t __rev2_520;  __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 1, 0); \
+  float64_t __ret_520; \
+  __ret_520 = __noswap_vfmad_laneq_f64(__s0_520, -__s1_520, __rev2_520, __p3_520); \
+  __ret_520; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmss_laneq_f32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \
+  float32_t __s0_521 = __p0_521; \
+  float32_t __s1_521 = __p1_521; \
+  float32x4_t __s2_521 = __p2_521; \
+  float32_t __ret_521; \
+  __ret_521 = vfmas_laneq_f32(__s0_521, -__s1_521, __s2_521, __p3_521); \
+  __ret_521; \
+})
+#else
+#define vfmss_laneq_f32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \
+  float32_t __s0_522 = __p0_522; \
+  float32_t __s1_522 = __p1_522; \
+  float32x4_t __s2_522 = __p2_522; \
+  float32x4_t __rev2_522;  __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \
+  float32_t __ret_522; \
+  __ret_522 = __noswap_vfmas_laneq_f32(__s0_522, -__s1_522, __rev2_522, __p3_522); \
+  __ret_522; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_laneq_f64(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \
+  float64x2_t __s0_523 = __p0_523; \
+  float64x2_t __s1_523 = __p1_523; \
+  float64x2_t __s2_523 = __p2_523; \
+  float64x2_t __ret_523; \
+  __ret_523 = vfmaq_laneq_f64(__s0_523, -__s1_523, __s2_523, __p3_523); \
+  __ret_523; \
+})
+#else
+#define vfmsq_laneq_f64(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \
+  float64x2_t __s0_524 = __p0_524; \
+  float64x2_t __s1_524 = __p1_524; \
+  float64x2_t __s2_524 = __p2_524; \
+  float64x2_t __rev0_524;  __rev0_524 = __builtin_shufflevector(__s0_524, __s0_524, 1, 0); \
+  float64x2_t __rev1_524;  __rev1_524 = __builtin_shufflevector(__s1_524, __s1_524, 1, 0); \
+  float64x2_t __rev2_524;  __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 1, 0); \
+  float64x2_t __ret_524; \
+  __ret_524 = __noswap_vfmaq_laneq_f64(__rev0_524, -__rev1_524, __rev2_524, __p3_524); \
+  __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 1, 0); \
+  __ret_524; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfmsq_laneq_f32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \
+  float32x4_t __s0_525 = __p0_525; \
+  float32x4_t __s1_525 = __p1_525; \
+  float32x4_t __s2_525 = __p2_525; \
+  float32x4_t __ret_525; \
+  __ret_525 = vfmaq_laneq_f32(__s0_525, -__s1_525, __s2_525, __p3_525); \
+  __ret_525; \
+})
+#else
+#define vfmsq_laneq_f32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \
+  float32x4_t __s0_526 = __p0_526; \
+  float32x4_t __s1_526 = __p1_526; \
+  float32x4_t __s2_526 = __p2_526; \
+  float32x4_t __rev0_526;  __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 3, 2, 1, 0); \
+  float32x4_t __rev1_526;  __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 3, 2, 1, 0); \
+  float32x4_t __rev2_526;  __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \
+  float32x4_t __ret_526; \
+  __ret_526 = __noswap_vfmaq_laneq_f32(__rev0_526, -__rev1_526, __rev2_526, __p3_526); \
+  __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 3, 2, 1, 0); \
+  __ret_526; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_laneq_f64(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \
+  float64x1_t __s0_527 = __p0_527; \
+  float64x1_t __s1_527 = __p1_527; \
+  float64x2_t __s2_527 = __p2_527; \
+  float64x1_t __ret_527; \
+  __ret_527 = vfma_laneq_f64(__s0_527, -__s1_527, __s2_527, __p3_527); \
+  __ret_527; \
+})
+#else
+#define vfms_laneq_f64(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \
+  float64x1_t __s0_528 = __p0_528; \
+  float64x1_t __s1_528 = __p1_528; \
+  float64x2_t __s2_528 = __p2_528; \
+  float64x2_t __rev2_528;  __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 1, 0); \
+  float64x1_t __ret_528; \
+  __ret_528 = __noswap_vfma_laneq_f64(__s0_528, -__s1_528, __rev2_528, __p3_528); \
+  __ret_528; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vfms_laneq_f32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \
+  float32x2_t __s0_529 = __p0_529; \
+  float32x2_t __s1_529 = __p1_529; \
+  float32x4_t __s2_529 = __p2_529; \
+  float32x2_t __ret_529; \
+  __ret_529 = vfma_laneq_f32(__s0_529, -__s1_529, __s2_529, __p3_529); \
+  __ret_529; \
+})
+#else
+#define vfms_laneq_f32(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \
+  float32x2_t __s0_530 = __p0_530; \
+  float32x2_t __s1_530 = __p1_530; \
+  float32x4_t __s2_530 = __p2_530; \
+  float32x2_t __rev0_530;  __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 1, 0); \
+  float32x2_t __rev1_530;  __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 1, 0); \
+  float32x4_t __rev2_530;  __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 3, 2, 1, 0); \
+  float32x2_t __ret_530; \
+  __ret_530 = __noswap_vfma_laneq_f32(__rev0_530, -__rev1_530, __rev2_530, __p3_530); \
+  __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 1, 0); \
+  __ret_530; \
 })
 #endif
 
@@ -55164,534 +55352,534 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u32(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \
-  uint32x4_t __s0_515 = __p0_515; \
-  uint32x4_t __s1_515 = __p1_515; \
-  uint32x4_t __s2_515 = __p2_515; \
-  uint32x4_t __ret_515; \
-  __ret_515 = __s0_515 + __s1_515 * splatq_laneq_u32(__s2_515, __p3_515); \
-  __ret_515; \
-})
-#else
-#define vmlaq_laneq_u32(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \
-  uint32x4_t __s0_516 = __p0_516; \
-  uint32x4_t __s1_516 = __p1_516; \
-  uint32x4_t __s2_516 = __p2_516; \
-  uint32x4_t __rev0_516;  __rev0_516 = __builtin_shufflevector(__s0_516, __s0_516, 3, 2, 1, 0); \
-  uint32x4_t __rev1_516;  __rev1_516 = __builtin_shufflevector(__s1_516, __s1_516, 3, 2, 1, 0); \
-  uint32x4_t __rev2_516;  __rev2_516 = __builtin_shufflevector(__s2_516, __s2_516, 3, 2, 1, 0); \
-  uint32x4_t __ret_516; \
-  __ret_516 = __rev0_516 + __rev1_516 * __noswap_splatq_laneq_u32(__rev2_516, __p3_516); \
-  __ret_516 = __builtin_shufflevector(__ret_516, __ret_516, 3, 2, 1, 0); \
-  __ret_516; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u16(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \
-  uint16x8_t __s0_517 = __p0_517; \
-  uint16x8_t __s1_517 = __p1_517; \
-  uint16x8_t __s2_517 = __p2_517; \
-  uint16x8_t __ret_517; \
-  __ret_517 = __s0_517 + __s1_517 * splatq_laneq_u16(__s2_517, __p3_517); \
-  __ret_517; \
-})
-#else
-#define vmlaq_laneq_u16(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \
-  uint16x8_t __s0_518 = __p0_518; \
-  uint16x8_t __s1_518 = __p1_518; \
-  uint16x8_t __s2_518 = __p2_518; \
-  uint16x8_t __rev0_518;  __rev0_518 = __builtin_shufflevector(__s0_518, __s0_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_518;  __rev1_518 = __builtin_shufflevector(__s1_518, __s1_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_518;  __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_518; \
-  __ret_518 = __rev0_518 + __rev1_518 * __noswap_splatq_laneq_u16(__rev2_518, __p3_518); \
-  __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_518; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_f32(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \
-  float32x4_t __s0_519 = __p0_519; \
-  float32x4_t __s1_519 = __p1_519; \
-  float32x4_t __s2_519 = __p2_519; \
-  float32x4_t __ret_519; \
-  __ret_519 = __s0_519 + __s1_519 * splatq_laneq_f32(__s2_519, __p3_519); \
-  __ret_519; \
-})
-#else
-#define vmlaq_laneq_f32(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \
-  float32x4_t __s0_520 = __p0_520; \
-  float32x4_t __s1_520 = __p1_520; \
-  float32x4_t __s2_520 = __p2_520; \
-  float32x4_t __rev0_520;  __rev0_520 = __builtin_shufflevector(__s0_520, __s0_520, 3, 2, 1, 0); \
-  float32x4_t __rev1_520;  __rev1_520 = __builtin_shufflevector(__s1_520, __s1_520, 3, 2, 1, 0); \
-  float32x4_t __rev2_520;  __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 3, 2, 1, 0); \
-  float32x4_t __ret_520; \
-  __ret_520 = __rev0_520 + __rev1_520 * __noswap_splatq_laneq_f32(__rev2_520, __p3_520); \
-  __ret_520 = __builtin_shufflevector(__ret_520, __ret_520, 3, 2, 1, 0); \
-  __ret_520; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \
-  int32x4_t __s0_521 = __p0_521; \
-  int32x4_t __s1_521 = __p1_521; \
-  int32x4_t __s2_521 = __p2_521; \
-  int32x4_t __ret_521; \
-  __ret_521 = __s0_521 + __s1_521 * splatq_laneq_s32(__s2_521, __p3_521); \
-  __ret_521; \
-})
-#else
-#define vmlaq_laneq_s32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \
-  int32x4_t __s0_522 = __p0_522; \
-  int32x4_t __s1_522 = __p1_522; \
-  int32x4_t __s2_522 = __p2_522; \
-  int32x4_t __rev0_522;  __rev0_522 = __builtin_shufflevector(__s0_522, __s0_522, 3, 2, 1, 0); \
-  int32x4_t __rev1_522;  __rev1_522 = __builtin_shufflevector(__s1_522, __s1_522, 3, 2, 1, 0); \
-  int32x4_t __rev2_522;  __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \
-  int32x4_t __ret_522; \
-  __ret_522 = __rev0_522 + __rev1_522 * __noswap_splatq_laneq_s32(__rev2_522, __p3_522); \
-  __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); \
-  __ret_522; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s16(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \
-  int16x8_t __s0_523 = __p0_523; \
-  int16x8_t __s1_523 = __p1_523; \
-  int16x8_t __s2_523 = __p2_523; \
-  int16x8_t __ret_523; \
-  __ret_523 = __s0_523 + __s1_523 * splatq_laneq_s16(__s2_523, __p3_523); \
-  __ret_523; \
-})
-#else
-#define vmlaq_laneq_s16(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \
-  int16x8_t __s0_524 = __p0_524; \
-  int16x8_t __s1_524 = __p1_524; \
-  int16x8_t __s2_524 = __p2_524; \
-  int16x8_t __rev0_524;  __rev0_524 = __builtin_shufflevector(__s0_524, __s0_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_524;  __rev1_524 = __builtin_shufflevector(__s1_524, __s1_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_524;  __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_524; \
-  __ret_524 = __rev0_524 + __rev1_524 * __noswap_splatq_laneq_s16(__rev2_524, __p3_524); \
-  __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_524; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \
-  uint32x2_t __s0_525 = __p0_525; \
-  uint32x2_t __s1_525 = __p1_525; \
-  uint32x4_t __s2_525 = __p2_525; \
-  uint32x2_t __ret_525; \
-  __ret_525 = __s0_525 + __s1_525 * splat_laneq_u32(__s2_525, __p3_525); \
-  __ret_525; \
-})
-#else
-#define vmla_laneq_u32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \
-  uint32x2_t __s0_526 = __p0_526; \
-  uint32x2_t __s1_526 = __p1_526; \
-  uint32x4_t __s2_526 = __p2_526; \
-  uint32x2_t __rev0_526;  __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 1, 0); \
-  uint32x2_t __rev1_526;  __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 1, 0); \
-  uint32x4_t __rev2_526;  __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \
-  uint32x2_t __ret_526; \
-  __ret_526 = __rev0_526 + __rev1_526 * __noswap_splat_laneq_u32(__rev2_526, __p3_526); \
-  __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 1, 0); \
-  __ret_526; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u16(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \
-  uint16x4_t __s0_527 = __p0_527; \
-  uint16x4_t __s1_527 = __p1_527; \
-  uint16x8_t __s2_527 = __p2_527; \
-  uint16x4_t __ret_527; \
-  __ret_527 = __s0_527 + __s1_527 * splat_laneq_u16(__s2_527, __p3_527); \
-  __ret_527; \
-})
-#else
-#define vmla_laneq_u16(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \
-  uint16x4_t __s0_528 = __p0_528; \
-  uint16x4_t __s1_528 = __p1_528; \
-  uint16x8_t __s2_528 = __p2_528; \
-  uint16x4_t __rev0_528;  __rev0_528 = __builtin_shufflevector(__s0_528, __s0_528, 3, 2, 1, 0); \
-  uint16x4_t __rev1_528;  __rev1_528 = __builtin_shufflevector(__s1_528, __s1_528, 3, 2, 1, 0); \
-  uint16x8_t __rev2_528;  __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_528; \
-  __ret_528 = __rev0_528 + __rev1_528 * __noswap_splat_laneq_u16(__rev2_528, __p3_528); \
-  __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 3, 2, 1, 0); \
-  __ret_528; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_f32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \
-  float32x2_t __s0_529 = __p0_529; \
-  float32x2_t __s1_529 = __p1_529; \
-  float32x4_t __s2_529 = __p2_529; \
-  float32x2_t __ret_529; \
-  __ret_529 = __s0_529 + __s1_529 * splat_laneq_f32(__s2_529, __p3_529); \
-  __ret_529; \
-})
-#else
-#define vmla_laneq_f32(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \
-  float32x2_t __s0_530 = __p0_530; \
-  float32x2_t __s1_530 = __p1_530; \
-  float32x4_t __s2_530 = __p2_530; \
-  float32x2_t __rev0_530;  __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 1, 0); \
-  float32x2_t __rev1_530;  __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 1, 0); \
-  float32x4_t __rev2_530;  __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 3, 2, 1, 0); \
-  float32x2_t __ret_530; \
-  __ret_530 = __rev0_530 + __rev1_530 * __noswap_splat_laneq_f32(__rev2_530, __p3_530); \
-  __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 1, 0); \
-  __ret_530; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \
-  int32x2_t __s0_531 = __p0_531; \
-  int32x2_t __s1_531 = __p1_531; \
-  int32x4_t __s2_531 = __p2_531; \
-  int32x2_t __ret_531; \
-  __ret_531 = __s0_531 + __s1_531 * splat_laneq_s32(__s2_531, __p3_531); \
+#define vmlaq_laneq_u32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \
+  uint32x4_t __s0_531 = __p0_531; \
+  uint32x4_t __s1_531 = __p1_531; \
+  uint32x4_t __s2_531 = __p2_531; \
+  uint32x4_t __ret_531; \
+  __ret_531 = __s0_531 + __s1_531 * splatq_laneq_u32(__s2_531, __p3_531); \
   __ret_531; \
 })
 #else
-#define vmla_laneq_s32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \
-  int32x2_t __s0_532 = __p0_532; \
-  int32x2_t __s1_532 = __p1_532; \
-  int32x4_t __s2_532 = __p2_532; \
-  int32x2_t __rev0_532;  __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 1, 0); \
-  int32x2_t __rev1_532;  __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 1, 0); \
-  int32x4_t __rev2_532;  __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \
-  int32x2_t __ret_532; \
-  __ret_532 = __rev0_532 + __rev1_532 * __noswap_splat_laneq_s32(__rev2_532, __p3_532); \
-  __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 1, 0); \
+#define vmlaq_laneq_u32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \
+  uint32x4_t __s0_532 = __p0_532; \
+  uint32x4_t __s1_532 = __p1_532; \
+  uint32x4_t __s2_532 = __p2_532; \
+  uint32x4_t __rev0_532;  __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 3, 2, 1, 0); \
+  uint32x4_t __rev1_532;  __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 3, 2, 1, 0); \
+  uint32x4_t __rev2_532;  __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \
+  uint32x4_t __ret_532; \
+  __ret_532 = __rev0_532 + __rev1_532 * __noswap_splatq_laneq_u32(__rev2_532, __p3_532); \
+  __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 3, 2, 1, 0); \
   __ret_532; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s16(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \
-  int16x4_t __s0_533 = __p0_533; \
-  int16x4_t __s1_533 = __p1_533; \
-  int16x8_t __s2_533 = __p2_533; \
-  int16x4_t __ret_533; \
-  __ret_533 = __s0_533 + __s1_533 * splat_laneq_s16(__s2_533, __p3_533); \
+#define vmlaq_laneq_u16(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \
+  uint16x8_t __s0_533 = __p0_533; \
+  uint16x8_t __s1_533 = __p1_533; \
+  uint16x8_t __s2_533 = __p2_533; \
+  uint16x8_t __ret_533; \
+  __ret_533 = __s0_533 + __s1_533 * splatq_laneq_u16(__s2_533, __p3_533); \
   __ret_533; \
 })
 #else
-#define vmla_laneq_s16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \
-  int16x4_t __s0_534 = __p0_534; \
-  int16x4_t __s1_534 = __p1_534; \
-  int16x8_t __s2_534 = __p2_534; \
-  int16x4_t __rev0_534;  __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 3, 2, 1, 0); \
-  int16x4_t __rev1_534;  __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 3, 2, 1, 0); \
-  int16x8_t __rev2_534;  __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_534; \
-  __ret_534 = __rev0_534 + __rev1_534 * __noswap_splat_laneq_s16(__rev2_534, __p3_534); \
-  __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 3, 2, 1, 0); \
+#define vmlaq_laneq_u16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \
+  uint16x8_t __s0_534 = __p0_534; \
+  uint16x8_t __s1_534 = __p1_534; \
+  uint16x8_t __s2_534 = __p2_534; \
+  uint16x8_t __rev0_534;  __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_534;  __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_534;  __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_534; \
+  __ret_534 = __rev0_534 + __rev1_534 * __noswap_splatq_laneq_u16(__rev2_534, __p3_534); \
+  __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_534; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u32(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \
-  uint64x2_t __s0_535 = __p0_535; \
-  uint32x4_t __s1_535 = __p1_535; \
-  uint32x2_t __s2_535 = __p2_535; \
-  uint64x2_t __ret_535; \
-  __ret_535 = __s0_535 + vmull_u32(vget_high_u32(__s1_535), splat_lane_u32(__s2_535, __p3_535)); \
+#define vmlaq_laneq_f32(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \
+  float32x4_t __s0_535 = __p0_535; \
+  float32x4_t __s1_535 = __p1_535; \
+  float32x4_t __s2_535 = __p2_535; \
+  float32x4_t __ret_535; \
+  __ret_535 = __s0_535 + __s1_535 * splatq_laneq_f32(__s2_535, __p3_535); \
   __ret_535; \
 })
 #else
-#define vmlal_high_lane_u32(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \
-  uint64x2_t __s0_536 = __p0_536; \
-  uint32x4_t __s1_536 = __p1_536; \
-  uint32x2_t __s2_536 = __p2_536; \
-  uint64x2_t __rev0_536;  __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 1, 0); \
-  uint32x4_t __rev1_536;  __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 3, 2, 1, 0); \
-  uint32x2_t __rev2_536;  __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 1, 0); \
-  uint64x2_t __ret_536; \
-  __ret_536 = __rev0_536 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_536), __noswap_splat_lane_u32(__rev2_536, __p3_536)); \
-  __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 1, 0); \
+#define vmlaq_laneq_f32(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \
+  float32x4_t __s0_536 = __p0_536; \
+  float32x4_t __s1_536 = __p1_536; \
+  float32x4_t __s2_536 = __p2_536; \
+  float32x4_t __rev0_536;  __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 3, 2, 1, 0); \
+  float32x4_t __rev1_536;  __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 3, 2, 1, 0); \
+  float32x4_t __rev2_536;  __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 3, 2, 1, 0); \
+  float32x4_t __ret_536; \
+  __ret_536 = __rev0_536 + __rev1_536 * __noswap_splatq_laneq_f32(__rev2_536, __p3_536); \
+  __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 3, 2, 1, 0); \
   __ret_536; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u16(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \
-  uint32x4_t __s0_537 = __p0_537; \
-  uint16x8_t __s1_537 = __p1_537; \
-  uint16x4_t __s2_537 = __p2_537; \
-  uint32x4_t __ret_537; \
-  __ret_537 = __s0_537 + vmull_u16(vget_high_u16(__s1_537), splat_lane_u16(__s2_537, __p3_537)); \
+#define vmlaq_laneq_s32(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \
+  int32x4_t __s0_537 = __p0_537; \
+  int32x4_t __s1_537 = __p1_537; \
+  int32x4_t __s2_537 = __p2_537; \
+  int32x4_t __ret_537; \
+  __ret_537 = __s0_537 + __s1_537 * splatq_laneq_s32(__s2_537, __p3_537); \
   __ret_537; \
 })
 #else
-#define vmlal_high_lane_u16(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \
-  uint32x4_t __s0_538 = __p0_538; \
-  uint16x8_t __s1_538 = __p1_538; \
-  uint16x4_t __s2_538 = __p2_538; \
-  uint32x4_t __rev0_538;  __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 3, 2, 1, 0); \
-  uint16x8_t __rev1_538;  __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_538;  __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \
-  uint32x4_t __ret_538; \
-  __ret_538 = __rev0_538 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_538), __noswap_splat_lane_u16(__rev2_538, __p3_538)); \
+#define vmlaq_laneq_s32(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \
+  int32x4_t __s0_538 = __p0_538; \
+  int32x4_t __s1_538 = __p1_538; \
+  int32x4_t __s2_538 = __p2_538; \
+  int32x4_t __rev0_538;  __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 3, 2, 1, 0); \
+  int32x4_t __rev1_538;  __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 3, 2, 1, 0); \
+  int32x4_t __rev2_538;  __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \
+  int32x4_t __ret_538; \
+  __ret_538 = __rev0_538 + __rev1_538 * __noswap_splatq_laneq_s32(__rev2_538, __p3_538); \
   __ret_538 = __builtin_shufflevector(__ret_538, __ret_538, 3, 2, 1, 0); \
   __ret_538; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s32(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \
-  int64x2_t __s0_539 = __p0_539; \
-  int32x4_t __s1_539 = __p1_539; \
-  int32x2_t __s2_539 = __p2_539; \
-  int64x2_t __ret_539; \
-  __ret_539 = __s0_539 + vmull_s32(vget_high_s32(__s1_539), splat_lane_s32(__s2_539, __p3_539)); \
+#define vmlaq_laneq_s16(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \
+  int16x8_t __s0_539 = __p0_539; \
+  int16x8_t __s1_539 = __p1_539; \
+  int16x8_t __s2_539 = __p2_539; \
+  int16x8_t __ret_539; \
+  __ret_539 = __s0_539 + __s1_539 * splatq_laneq_s16(__s2_539, __p3_539); \
   __ret_539; \
 })
 #else
-#define vmlal_high_lane_s32(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \
-  int64x2_t __s0_540 = __p0_540; \
-  int32x4_t __s1_540 = __p1_540; \
-  int32x2_t __s2_540 = __p2_540; \
-  int64x2_t __rev0_540;  __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 1, 0); \
-  int32x4_t __rev1_540;  __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 3, 2, 1, 0); \
-  int32x2_t __rev2_540;  __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 1, 0); \
-  int64x2_t __ret_540; \
-  __ret_540 = __rev0_540 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_540), __noswap_splat_lane_s32(__rev2_540, __p3_540)); \
-  __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 1, 0); \
+#define vmlaq_laneq_s16(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \
+  int16x8_t __s0_540 = __p0_540; \
+  int16x8_t __s1_540 = __p1_540; \
+  int16x8_t __s2_540 = __p2_540; \
+  int16x8_t __rev0_540;  __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_540;  __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_540;  __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_540; \
+  __ret_540 = __rev0_540 + __rev1_540 * __noswap_splatq_laneq_s16(__rev2_540, __p3_540); \
+  __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_540; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s16(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \
-  int32x4_t __s0_541 = __p0_541; \
-  int16x8_t __s1_541 = __p1_541; \
-  int16x4_t __s2_541 = __p2_541; \
-  int32x4_t __ret_541; \
-  __ret_541 = __s0_541 + vmull_s16(vget_high_s16(__s1_541), splat_lane_s16(__s2_541, __p3_541)); \
+#define vmla_laneq_u32(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \
+  uint32x2_t __s0_541 = __p0_541; \
+  uint32x2_t __s1_541 = __p1_541; \
+  uint32x4_t __s2_541 = __p2_541; \
+  uint32x2_t __ret_541; \
+  __ret_541 = __s0_541 + __s1_541 * splat_laneq_u32(__s2_541, __p3_541); \
   __ret_541; \
 })
 #else
-#define vmlal_high_lane_s16(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \
-  int32x4_t __s0_542 = __p0_542; \
-  int16x8_t __s1_542 = __p1_542; \
-  int16x4_t __s2_542 = __p2_542; \
-  int32x4_t __rev0_542;  __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 3, 2, 1, 0); \
-  int16x8_t __rev1_542;  __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_542;  __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \
-  int32x4_t __ret_542; \
-  __ret_542 = __rev0_542 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_542), __noswap_splat_lane_s16(__rev2_542, __p3_542)); \
-  __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 3, 2, 1, 0); \
+#define vmla_laneq_u32(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \
+  uint32x2_t __s0_542 = __p0_542; \
+  uint32x2_t __s1_542 = __p1_542; \
+  uint32x4_t __s2_542 = __p2_542; \
+  uint32x2_t __rev0_542;  __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 1, 0); \
+  uint32x2_t __rev1_542;  __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 1, 0); \
+  uint32x4_t __rev2_542;  __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \
+  uint32x2_t __ret_542; \
+  __ret_542 = __rev0_542 + __rev1_542 * __noswap_splat_laneq_u32(__rev2_542, __p3_542); \
+  __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 1, 0); \
   __ret_542; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u32(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \
-  uint64x2_t __s0_543 = __p0_543; \
-  uint32x4_t __s1_543 = __p1_543; \
-  uint32x4_t __s2_543 = __p2_543; \
-  uint64x2_t __ret_543; \
-  __ret_543 = __s0_543 + vmull_u32(vget_high_u32(__s1_543), splat_laneq_u32(__s2_543, __p3_543)); \
+#define vmla_laneq_u16(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \
+  uint16x4_t __s0_543 = __p0_543; \
+  uint16x4_t __s1_543 = __p1_543; \
+  uint16x8_t __s2_543 = __p2_543; \
+  uint16x4_t __ret_543; \
+  __ret_543 = __s0_543 + __s1_543 * splat_laneq_u16(__s2_543, __p3_543); \
   __ret_543; \
 })
 #else
-#define vmlal_high_laneq_u32(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \
-  uint64x2_t __s0_544 = __p0_544; \
-  uint32x4_t __s1_544 = __p1_544; \
-  uint32x4_t __s2_544 = __p2_544; \
-  uint64x2_t __rev0_544;  __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 1, 0); \
-  uint32x4_t __rev1_544;  __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 3, 2, 1, 0); \
-  uint32x4_t __rev2_544;  __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 3, 2, 1, 0); \
-  uint64x2_t __ret_544; \
-  __ret_544 = __rev0_544 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_544), __noswap_splat_laneq_u32(__rev2_544, __p3_544)); \
-  __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 1, 0); \
+#define vmla_laneq_u16(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \
+  uint16x4_t __s0_544 = __p0_544; \
+  uint16x4_t __s1_544 = __p1_544; \
+  uint16x8_t __s2_544 = __p2_544; \
+  uint16x4_t __rev0_544;  __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 3, 2, 1, 0); \
+  uint16x4_t __rev1_544;  __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 3, 2, 1, 0); \
+  uint16x8_t __rev2_544;  __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_544; \
+  __ret_544 = __rev0_544 + __rev1_544 * __noswap_splat_laneq_u16(__rev2_544, __p3_544); \
+  __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 3, 2, 1, 0); \
   __ret_544; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u16(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \
-  uint32x4_t __s0_545 = __p0_545; \
-  uint16x8_t __s1_545 = __p1_545; \
-  uint16x8_t __s2_545 = __p2_545; \
-  uint32x4_t __ret_545; \
-  __ret_545 = __s0_545 + vmull_u16(vget_high_u16(__s1_545), splat_laneq_u16(__s2_545, __p3_545)); \
+#define vmla_laneq_f32(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \
+  float32x2_t __s0_545 = __p0_545; \
+  float32x2_t __s1_545 = __p1_545; \
+  float32x4_t __s2_545 = __p2_545; \
+  float32x2_t __ret_545; \
+  __ret_545 = __s0_545 + __s1_545 * splat_laneq_f32(__s2_545, __p3_545); \
   __ret_545; \
 })
 #else
-#define vmlal_high_laneq_u16(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \
-  uint32x4_t __s0_546 = __p0_546; \
-  uint16x8_t __s1_546 = __p1_546; \
-  uint16x8_t __s2_546 = __p2_546; \
-  uint32x4_t __rev0_546;  __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 3, 2, 1, 0); \
-  uint16x8_t __rev1_546;  __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_546;  __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_546; \
-  __ret_546 = __rev0_546 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_546), __noswap_splat_laneq_u16(__rev2_546, __p3_546)); \
-  __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 3, 2, 1, 0); \
+#define vmla_laneq_f32(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \
+  float32x2_t __s0_546 = __p0_546; \
+  float32x2_t __s1_546 = __p1_546; \
+  float32x4_t __s2_546 = __p2_546; \
+  float32x2_t __rev0_546;  __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 1, 0); \
+  float32x2_t __rev1_546;  __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 1, 0); \
+  float32x4_t __rev2_546;  __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 3, 2, 1, 0); \
+  float32x2_t __ret_546; \
+  __ret_546 = __rev0_546 + __rev1_546 * __noswap_splat_laneq_f32(__rev2_546, __p3_546); \
+  __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 1, 0); \
   __ret_546; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \
-  int64x2_t __s0_547 = __p0_547; \
-  int32x4_t __s1_547 = __p1_547; \
+#define vmla_laneq_s32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \
+  int32x2_t __s0_547 = __p0_547; \
+  int32x2_t __s1_547 = __p1_547; \
   int32x4_t __s2_547 = __p2_547; \
-  int64x2_t __ret_547; \
-  __ret_547 = __s0_547 + vmull_s32(vget_high_s32(__s1_547), splat_laneq_s32(__s2_547, __p3_547)); \
+  int32x2_t __ret_547; \
+  __ret_547 = __s0_547 + __s1_547 * splat_laneq_s32(__s2_547, __p3_547); \
   __ret_547; \
 })
 #else
-#define vmlal_high_laneq_s32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \
-  int64x2_t __s0_548 = __p0_548; \
-  int32x4_t __s1_548 = __p1_548; \
+#define vmla_laneq_s32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \
+  int32x2_t __s0_548 = __p0_548; \
+  int32x2_t __s1_548 = __p1_548; \
   int32x4_t __s2_548 = __p2_548; \
-  int64x2_t __rev0_548;  __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \
-  int32x4_t __rev1_548;  __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 3, 2, 1, 0); \
+  int32x2_t __rev0_548;  __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \
+  int32x2_t __rev1_548;  __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 1, 0); \
   int32x4_t __rev2_548;  __rev2_548 = __builtin_shufflevector(__s2_548, __s2_548, 3, 2, 1, 0); \
-  int64x2_t __ret_548; \
-  __ret_548 = __rev0_548 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_548), __noswap_splat_laneq_s32(__rev2_548, __p3_548)); \
+  int32x2_t __ret_548; \
+  __ret_548 = __rev0_548 + __rev1_548 * __noswap_splat_laneq_s32(__rev2_548, __p3_548); \
   __ret_548 = __builtin_shufflevector(__ret_548, __ret_548, 1, 0); \
   __ret_548; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \
-  int32x4_t __s0_549 = __p0_549; \
-  int16x8_t __s1_549 = __p1_549; \
+#define vmla_laneq_s16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \
+  int16x4_t __s0_549 = __p0_549; \
+  int16x4_t __s1_549 = __p1_549; \
   int16x8_t __s2_549 = __p2_549; \
-  int32x4_t __ret_549; \
-  __ret_549 = __s0_549 + vmull_s16(vget_high_s16(__s1_549), splat_laneq_s16(__s2_549, __p3_549)); \
+  int16x4_t __ret_549; \
+  __ret_549 = __s0_549 + __s1_549 * splat_laneq_s16(__s2_549, __p3_549); \
   __ret_549; \
 })
 #else
-#define vmlal_high_laneq_s16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \
-  int32x4_t __s0_550 = __p0_550; \
-  int16x8_t __s1_550 = __p1_550; \
+#define vmla_laneq_s16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \
+  int16x4_t __s0_550 = __p0_550; \
+  int16x4_t __s1_550 = __p1_550; \
   int16x8_t __s2_550 = __p2_550; \
-  int32x4_t __rev0_550;  __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \
-  int16x8_t __rev1_550;  __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev0_550;  __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \
+  int16x4_t __rev1_550;  __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 3, 2, 1, 0); \
   int16x8_t __rev2_550;  __rev2_550 = __builtin_shufflevector(__s2_550, __s2_550, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_550; \
-  __ret_550 = __rev0_550 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_550), __noswap_splat_laneq_s16(__rev2_550, __p3_550)); \
+  int16x4_t __ret_550; \
+  __ret_550 = __rev0_550 + __rev1_550 * __noswap_splat_laneq_s16(__rev2_550, __p3_550); \
   __ret_550 = __builtin_shufflevector(__ret_550, __ret_550, 3, 2, 1, 0); \
   __ret_550; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \
+#define vmlal_high_lane_u32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \
   uint64x2_t __s0_551 = __p0_551; \
-  uint32x2_t __s1_551 = __p1_551; \
-  uint32x4_t __s2_551 = __p2_551; \
+  uint32x4_t __s1_551 = __p1_551; \
+  uint32x2_t __s2_551 = __p2_551; \
   uint64x2_t __ret_551; \
-  __ret_551 = __s0_551 + vmull_u32(__s1_551, splat_laneq_u32(__s2_551, __p3_551)); \
+  __ret_551 = __s0_551 + vmull_u32(vget_high_u32(__s1_551), splat_lane_u32(__s2_551, __p3_551)); \
   __ret_551; \
 })
 #else
-#define vmlal_laneq_u32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \
+#define vmlal_high_lane_u32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \
   uint64x2_t __s0_552 = __p0_552; \
-  uint32x2_t __s1_552 = __p1_552; \
-  uint32x4_t __s2_552 = __p2_552; \
+  uint32x4_t __s1_552 = __p1_552; \
+  uint32x2_t __s2_552 = __p2_552; \
   uint64x2_t __rev0_552;  __rev0_552 = __builtin_shufflevector(__s0_552, __s0_552, 1, 0); \
-  uint32x2_t __rev1_552;  __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 1, 0); \
-  uint32x4_t __rev2_552;  __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 3, 2, 1, 0); \
+  uint32x4_t __rev1_552;  __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 3, 2, 1, 0); \
+  uint32x2_t __rev2_552;  __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 1, 0); \
   uint64x2_t __ret_552; \
-  __ret_552 = __rev0_552 + __noswap_vmull_u32(__rev1_552, __noswap_splat_laneq_u32(__rev2_552, __p3_552)); \
+  __ret_552 = __rev0_552 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_552), __noswap_splat_lane_u32(__rev2_552, __p3_552)); \
   __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 1, 0); \
   __ret_552; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \
+#define vmlal_high_lane_u16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \
   uint32x4_t __s0_553 = __p0_553; \
-  uint16x4_t __s1_553 = __p1_553; \
-  uint16x8_t __s2_553 = __p2_553; \
+  uint16x8_t __s1_553 = __p1_553; \
+  uint16x4_t __s2_553 = __p2_553; \
   uint32x4_t __ret_553; \
-  __ret_553 = __s0_553 + vmull_u16(__s1_553, splat_laneq_u16(__s2_553, __p3_553)); \
+  __ret_553 = __s0_553 + vmull_u16(vget_high_u16(__s1_553), splat_lane_u16(__s2_553, __p3_553)); \
   __ret_553; \
 })
 #else
-#define vmlal_laneq_u16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \
+#define vmlal_high_lane_u16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \
   uint32x4_t __s0_554 = __p0_554; \
-  uint16x4_t __s1_554 = __p1_554; \
-  uint16x8_t __s2_554 = __p2_554; \
+  uint16x8_t __s1_554 = __p1_554; \
+  uint16x4_t __s2_554 = __p2_554; \
   uint32x4_t __rev0_554;  __rev0_554 = __builtin_shufflevector(__s0_554, __s0_554, 3, 2, 1, 0); \
-  uint16x4_t __rev1_554;  __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 3, 2, 1, 0); \
-  uint16x8_t __rev2_554;  __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_554;  __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_554;  __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 3, 2, 1, 0); \
   uint32x4_t __ret_554; \
-  __ret_554 = __rev0_554 + __noswap_vmull_u16(__rev1_554, __noswap_splat_laneq_u16(__rev2_554, __p3_554)); \
+  __ret_554 = __rev0_554 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_554), __noswap_splat_lane_u16(__rev2_554, __p3_554)); \
   __ret_554 = __builtin_shufflevector(__ret_554, __ret_554, 3, 2, 1, 0); \
   __ret_554; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \
+#define vmlal_high_lane_s32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \
   int64x2_t __s0_555 = __p0_555; \
-  int32x2_t __s1_555 = __p1_555; \
-  int32x4_t __s2_555 = __p2_555; \
+  int32x4_t __s1_555 = __p1_555; \
+  int32x2_t __s2_555 = __p2_555; \
   int64x2_t __ret_555; \
-  __ret_555 = __s0_555 + vmull_s32(__s1_555, splat_laneq_s32(__s2_555, __p3_555)); \
+  __ret_555 = __s0_555 + vmull_s32(vget_high_s32(__s1_555), splat_lane_s32(__s2_555, __p3_555)); \
   __ret_555; \
 })
 #else
-#define vmlal_laneq_s32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \
+#define vmlal_high_lane_s32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \
   int64x2_t __s0_556 = __p0_556; \
-  int32x2_t __s1_556 = __p1_556; \
-  int32x4_t __s2_556 = __p2_556; \
+  int32x4_t __s1_556 = __p1_556; \
+  int32x2_t __s2_556 = __p2_556; \
   int64x2_t __rev0_556;  __rev0_556 = __builtin_shufflevector(__s0_556, __s0_556, 1, 0); \
-  int32x2_t __rev1_556;  __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 1, 0); \
-  int32x4_t __rev2_556;  __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 3, 2, 1, 0); \
+  int32x4_t __rev1_556;  __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 3, 2, 1, 0); \
+  int32x2_t __rev2_556;  __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 1, 0); \
   int64x2_t __ret_556; \
-  __ret_556 = __rev0_556 + __noswap_vmull_s32(__rev1_556, __noswap_splat_laneq_s32(__rev2_556, __p3_556)); \
+  __ret_556 = __rev0_556 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_556), __noswap_splat_lane_s32(__rev2_556, __p3_556)); \
   __ret_556 = __builtin_shufflevector(__ret_556, __ret_556, 1, 0); \
   __ret_556; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \
+#define vmlal_high_lane_s16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \
   int32x4_t __s0_557 = __p0_557; \
-  int16x4_t __s1_557 = __p1_557; \
-  int16x8_t __s2_557 = __p2_557; \
+  int16x8_t __s1_557 = __p1_557; \
+  int16x4_t __s2_557 = __p2_557; \
   int32x4_t __ret_557; \
-  __ret_557 = __s0_557 + vmull_s16(__s1_557, splat_laneq_s16(__s2_557, __p3_557)); \
+  __ret_557 = __s0_557 + vmull_s16(vget_high_s16(__s1_557), splat_lane_s16(__s2_557, __p3_557)); \
   __ret_557; \
 })
 #else
-#define vmlal_laneq_s16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \
+#define vmlal_high_lane_s16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \
   int32x4_t __s0_558 = __p0_558; \
-  int16x4_t __s1_558 = __p1_558; \
-  int16x8_t __s2_558 = __p2_558; \
+  int16x8_t __s1_558 = __p1_558; \
+  int16x4_t __s2_558 = __p2_558; \
   int32x4_t __rev0_558;  __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 3, 2, 1, 0); \
-  int16x4_t __rev1_558;  __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 3, 2, 1, 0); \
-  int16x8_t __rev2_558;  __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_558;  __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_558;  __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 3, 2, 1, 0); \
   int32x4_t __ret_558; \
-  __ret_558 = __rev0_558 + __noswap_vmull_s16(__rev1_558, __noswap_splat_laneq_s16(__rev2_558, __p3_558)); \
+  __ret_558 = __rev0_558 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_558), __noswap_splat_lane_s16(__rev2_558, __p3_558)); \
   __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 3, 2, 1, 0); \
   __ret_558; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_u32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \
+  uint64x2_t __s0_559 = __p0_559; \
+  uint32x4_t __s1_559 = __p1_559; \
+  uint32x4_t __s2_559 = __p2_559; \
+  uint64x2_t __ret_559; \
+  __ret_559 = __s0_559 + vmull_u32(vget_high_u32(__s1_559), splat_laneq_u32(__s2_559, __p3_559)); \
+  __ret_559; \
+})
+#else
+#define vmlal_high_laneq_u32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \
+  uint64x2_t __s0_560 = __p0_560; \
+  uint32x4_t __s1_560 = __p1_560; \
+  uint32x4_t __s2_560 = __p2_560; \
+  uint64x2_t __rev0_560;  __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 1, 0); \
+  uint32x4_t __rev1_560;  __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \
+  uint32x4_t __rev2_560;  __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \
+  uint64x2_t __ret_560; \
+  __ret_560 = __rev0_560 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_560), __noswap_splat_laneq_u32(__rev2_560, __p3_560)); \
+  __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 1, 0); \
+  __ret_560; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_u16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \
+  uint32x4_t __s0_561 = __p0_561; \
+  uint16x8_t __s1_561 = __p1_561; \
+  uint16x8_t __s2_561 = __p2_561; \
+  uint32x4_t __ret_561; \
+  __ret_561 = __s0_561 + vmull_u16(vget_high_u16(__s1_561), splat_laneq_u16(__s2_561, __p3_561)); \
+  __ret_561; \
+})
+#else
+#define vmlal_high_laneq_u16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \
+  uint32x4_t __s0_562 = __p0_562; \
+  uint16x8_t __s1_562 = __p1_562; \
+  uint16x8_t __s2_562 = __p2_562; \
+  uint32x4_t __rev0_562;  __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 3, 2, 1, 0); \
+  uint16x8_t __rev1_562;  __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_562;  __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_562; \
+  __ret_562 = __rev0_562 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_562), __noswap_splat_laneq_u16(__rev2_562, __p3_562)); \
+  __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 3, 2, 1, 0); \
+  __ret_562; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_s32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \
+  int64x2_t __s0_563 = __p0_563; \
+  int32x4_t __s1_563 = __p1_563; \
+  int32x4_t __s2_563 = __p2_563; \
+  int64x2_t __ret_563; \
+  __ret_563 = __s0_563 + vmull_s32(vget_high_s32(__s1_563), splat_laneq_s32(__s2_563, __p3_563)); \
+  __ret_563; \
+})
+#else
+#define vmlal_high_laneq_s32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \
+  int64x2_t __s0_564 = __p0_564; \
+  int32x4_t __s1_564 = __p1_564; \
+  int32x4_t __s2_564 = __p2_564; \
+  int64x2_t __rev0_564;  __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 1, 0); \
+  int32x4_t __rev1_564;  __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \
+  int32x4_t __rev2_564;  __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \
+  int64x2_t __ret_564; \
+  __ret_564 = __rev0_564 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_564), __noswap_splat_laneq_s32(__rev2_564, __p3_564)); \
+  __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 1, 0); \
+  __ret_564; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_high_laneq_s16(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \
+  int32x4_t __s0_565 = __p0_565; \
+  int16x8_t __s1_565 = __p1_565; \
+  int16x8_t __s2_565 = __p2_565; \
+  int32x4_t __ret_565; \
+  __ret_565 = __s0_565 + vmull_s16(vget_high_s16(__s1_565), splat_laneq_s16(__s2_565, __p3_565)); \
+  __ret_565; \
+})
+#else
+#define vmlal_high_laneq_s16(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \
+  int32x4_t __s0_566 = __p0_566; \
+  int16x8_t __s1_566 = __p1_566; \
+  int16x8_t __s2_566 = __p2_566; \
+  int32x4_t __rev0_566;  __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \
+  int16x8_t __rev1_566;  __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_566;  __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_566; \
+  __ret_566 = __rev0_566 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_566), __noswap_splat_laneq_s16(__rev2_566, __p3_566)); \
+  __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \
+  __ret_566; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_u32(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \
+  uint64x2_t __s0_567 = __p0_567; \
+  uint32x2_t __s1_567 = __p1_567; \
+  uint32x4_t __s2_567 = __p2_567; \
+  uint64x2_t __ret_567; \
+  __ret_567 = __s0_567 + vmull_u32(__s1_567, splat_laneq_u32(__s2_567, __p3_567)); \
+  __ret_567; \
+})
+#else
+#define vmlal_laneq_u32(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \
+  uint64x2_t __s0_568 = __p0_568; \
+  uint32x2_t __s1_568 = __p1_568; \
+  uint32x4_t __s2_568 = __p2_568; \
+  uint64x2_t __rev0_568;  __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 1, 0); \
+  uint32x2_t __rev1_568;  __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 1, 0); \
+  uint32x4_t __rev2_568;  __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 3, 2, 1, 0); \
+  uint64x2_t __ret_568; \
+  __ret_568 = __rev0_568 + __noswap_vmull_u32(__rev1_568, __noswap_splat_laneq_u32(__rev2_568, __p3_568)); \
+  __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 1, 0); \
+  __ret_568; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_u16(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \
+  uint32x4_t __s0_569 = __p0_569; \
+  uint16x4_t __s1_569 = __p1_569; \
+  uint16x8_t __s2_569 = __p2_569; \
+  uint32x4_t __ret_569; \
+  __ret_569 = __s0_569 + vmull_u16(__s1_569, splat_laneq_u16(__s2_569, __p3_569)); \
+  __ret_569; \
+})
+#else
+#define vmlal_laneq_u16(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \
+  uint32x4_t __s0_570 = __p0_570; \
+  uint16x4_t __s1_570 = __p1_570; \
+  uint16x8_t __s2_570 = __p2_570; \
+  uint32x4_t __rev0_570;  __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 3, 2, 1, 0); \
+  uint16x4_t __rev1_570;  __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 3, 2, 1, 0); \
+  uint16x8_t __rev2_570;  __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_570; \
+  __ret_570 = __rev0_570 + __noswap_vmull_u16(__rev1_570, __noswap_splat_laneq_u16(__rev2_570, __p3_570)); \
+  __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 3, 2, 1, 0); \
+  __ret_570; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_s32(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \
+  int64x2_t __s0_571 = __p0_571; \
+  int32x2_t __s1_571 = __p1_571; \
+  int32x4_t __s2_571 = __p2_571; \
+  int64x2_t __ret_571; \
+  __ret_571 = __s0_571 + vmull_s32(__s1_571, splat_laneq_s32(__s2_571, __p3_571)); \
+  __ret_571; \
+})
+#else
+#define vmlal_laneq_s32(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \
+  int64x2_t __s0_572 = __p0_572; \
+  int32x2_t __s1_572 = __p1_572; \
+  int32x4_t __s2_572 = __p2_572; \
+  int64x2_t __rev0_572;  __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 1, 0); \
+  int32x2_t __rev1_572;  __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 1, 0); \
+  int32x4_t __rev2_572;  __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 3, 2, 1, 0); \
+  int64x2_t __ret_572; \
+  __ret_572 = __rev0_572 + __noswap_vmull_s32(__rev1_572, __noswap_splat_laneq_s32(__rev2_572, __p3_572)); \
+  __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 1, 0); \
+  __ret_572; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlal_laneq_s16(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \
+  int32x4_t __s0_573 = __p0_573; \
+  int16x4_t __s1_573 = __p1_573; \
+  int16x8_t __s2_573 = __p2_573; \
+  int32x4_t __ret_573; \
+  __ret_573 = __s0_573 + vmull_s16(__s1_573, splat_laneq_s16(__s2_573, __p3_573)); \
+  __ret_573; \
+})
+#else
+#define vmlal_laneq_s16(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \
+  int32x4_t __s0_574 = __p0_574; \
+  int16x4_t __s1_574 = __p1_574; \
+  int16x8_t __s2_574 = __p2_574; \
+  int32x4_t __rev0_574;  __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 3, 2, 1, 0); \
+  int16x4_t __rev1_574;  __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 3, 2, 1, 0); \
+  int16x8_t __rev2_574;  __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_574; \
+  __ret_574 = __rev0_574 + __noswap_vmull_s16(__rev1_574, __noswap_splat_laneq_s16(__rev2_574, __p3_574)); \
+  __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 3, 2, 1, 0); \
+  __ret_574; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
   float64x2_t __ret;
   __ret = __p0 - __p1 * __p2;
@@ -55715,533 +55903,533 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \
-  uint32x4_t __s0_559 = __p0_559; \
-  uint32x4_t __s1_559 = __p1_559; \
-  uint32x4_t __s2_559 = __p2_559; \
-  uint32x4_t __ret_559; \
-  __ret_559 = __s0_559 - __s1_559 * splatq_laneq_u32(__s2_559, __p3_559); \
-  __ret_559; \
-})
-#else
-#define vmlsq_laneq_u32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \
-  uint32x4_t __s0_560 = __p0_560; \
-  uint32x4_t __s1_560 = __p1_560; \
-  uint32x4_t __s2_560 = __p2_560; \
-  uint32x4_t __rev0_560;  __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 3, 2, 1, 0); \
-  uint32x4_t __rev1_560;  __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \
-  uint32x4_t __rev2_560;  __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \
-  uint32x4_t __ret_560; \
-  __ret_560 = __rev0_560 - __rev1_560 * __noswap_splatq_laneq_u32(__rev2_560, __p3_560); \
-  __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 3, 2, 1, 0); \
-  __ret_560; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \
-  uint16x8_t __s0_561 = __p0_561; \
-  uint16x8_t __s1_561 = __p1_561; \
-  uint16x8_t __s2_561 = __p2_561; \
-  uint16x8_t __ret_561; \
-  __ret_561 = __s0_561 - __s1_561 * splatq_laneq_u16(__s2_561, __p3_561); \
-  __ret_561; \
-})
-#else
-#define vmlsq_laneq_u16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \
-  uint16x8_t __s0_562 = __p0_562; \
-  uint16x8_t __s1_562 = __p1_562; \
-  uint16x8_t __s2_562 = __p2_562; \
-  uint16x8_t __rev0_562;  __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_562;  __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_562;  __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_562; \
-  __ret_562 = __rev0_562 - __rev1_562 * __noswap_splatq_laneq_u16(__rev2_562, __p3_562); \
-  __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_562; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_f32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \
-  float32x4_t __s0_563 = __p0_563; \
-  float32x4_t __s1_563 = __p1_563; \
-  float32x4_t __s2_563 = __p2_563; \
-  float32x4_t __ret_563; \
-  __ret_563 = __s0_563 - __s1_563 * splatq_laneq_f32(__s2_563, __p3_563); \
-  __ret_563; \
-})
-#else
-#define vmlsq_laneq_f32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \
-  float32x4_t __s0_564 = __p0_564; \
-  float32x4_t __s1_564 = __p1_564; \
-  float32x4_t __s2_564 = __p2_564; \
-  float32x4_t __rev0_564;  __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 3, 2, 1, 0); \
-  float32x4_t __rev1_564;  __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \
-  float32x4_t __rev2_564;  __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \
-  float32x4_t __ret_564; \
-  __ret_564 = __rev0_564 - __rev1_564 * __noswap_splatq_laneq_f32(__rev2_564, __p3_564); \
-  __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 3, 2, 1, 0); \
-  __ret_564; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s32(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \
-  int32x4_t __s0_565 = __p0_565; \
-  int32x4_t __s1_565 = __p1_565; \
-  int32x4_t __s2_565 = __p2_565; \
-  int32x4_t __ret_565; \
-  __ret_565 = __s0_565 - __s1_565 * splatq_laneq_s32(__s2_565, __p3_565); \
-  __ret_565; \
-})
-#else
-#define vmlsq_laneq_s32(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \
-  int32x4_t __s0_566 = __p0_566; \
-  int32x4_t __s1_566 = __p1_566; \
-  int32x4_t __s2_566 = __p2_566; \
-  int32x4_t __rev0_566;  __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \
-  int32x4_t __rev1_566;  __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 3, 2, 1, 0); \
-  int32x4_t __rev2_566;  __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 3, 2, 1, 0); \
-  int32x4_t __ret_566; \
-  __ret_566 = __rev0_566 - __rev1_566 * __noswap_splatq_laneq_s32(__rev2_566, __p3_566); \
-  __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \
-  __ret_566; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s16(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \
-  int16x8_t __s0_567 = __p0_567; \
-  int16x8_t __s1_567 = __p1_567; \
-  int16x8_t __s2_567 = __p2_567; \
-  int16x8_t __ret_567; \
-  __ret_567 = __s0_567 - __s1_567 * splatq_laneq_s16(__s2_567, __p3_567); \
-  __ret_567; \
-})
-#else
-#define vmlsq_laneq_s16(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \
-  int16x8_t __s0_568 = __p0_568; \
-  int16x8_t __s1_568 = __p1_568; \
-  int16x8_t __s2_568 = __p2_568; \
-  int16x8_t __rev0_568;  __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_568;  __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_568;  __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_568; \
-  __ret_568 = __rev0_568 - __rev1_568 * __noswap_splatq_laneq_s16(__rev2_568, __p3_568); \
-  __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_568; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u32(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \
-  uint32x2_t __s0_569 = __p0_569; \
-  uint32x2_t __s1_569 = __p1_569; \
-  uint32x4_t __s2_569 = __p2_569; \
-  uint32x2_t __ret_569; \
-  __ret_569 = __s0_569 - __s1_569 * splat_laneq_u32(__s2_569, __p3_569); \
-  __ret_569; \
-})
-#else
-#define vmls_laneq_u32(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \
-  uint32x2_t __s0_570 = __p0_570; \
-  uint32x2_t __s1_570 = __p1_570; \
-  uint32x4_t __s2_570 = __p2_570; \
-  uint32x2_t __rev0_570;  __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 1, 0); \
-  uint32x2_t __rev1_570;  __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 1, 0); \
-  uint32x4_t __rev2_570;  __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 3, 2, 1, 0); \
-  uint32x2_t __ret_570; \
-  __ret_570 = __rev0_570 - __rev1_570 * __noswap_splat_laneq_u32(__rev2_570, __p3_570); \
-  __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 1, 0); \
-  __ret_570; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u16(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \
-  uint16x4_t __s0_571 = __p0_571; \
-  uint16x4_t __s1_571 = __p1_571; \
-  uint16x8_t __s2_571 = __p2_571; \
-  uint16x4_t __ret_571; \
-  __ret_571 = __s0_571 - __s1_571 * splat_laneq_u16(__s2_571, __p3_571); \
-  __ret_571; \
-})
-#else
-#define vmls_laneq_u16(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \
-  uint16x4_t __s0_572 = __p0_572; \
-  uint16x4_t __s1_572 = __p1_572; \
-  uint16x8_t __s2_572 = __p2_572; \
-  uint16x4_t __rev0_572;  __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 3, 2, 1, 0); \
-  uint16x4_t __rev1_572;  __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 3, 2, 1, 0); \
-  uint16x8_t __rev2_572;  __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_572; \
-  __ret_572 = __rev0_572 - __rev1_572 * __noswap_splat_laneq_u16(__rev2_572, __p3_572); \
-  __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 3, 2, 1, 0); \
-  __ret_572; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_f32(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \
-  float32x2_t __s0_573 = __p0_573; \
-  float32x2_t __s1_573 = __p1_573; \
-  float32x4_t __s2_573 = __p2_573; \
-  float32x2_t __ret_573; \
-  __ret_573 = __s0_573 - __s1_573 * splat_laneq_f32(__s2_573, __p3_573); \
-  __ret_573; \
-})
-#else
-#define vmls_laneq_f32(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \
-  float32x2_t __s0_574 = __p0_574; \
-  float32x2_t __s1_574 = __p1_574; \
-  float32x4_t __s2_574 = __p2_574; \
-  float32x2_t __rev0_574;  __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 1, 0); \
-  float32x2_t __rev1_574;  __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 1, 0); \
-  float32x4_t __rev2_574;  __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 3, 2, 1, 0); \
-  float32x2_t __ret_574; \
-  __ret_574 = __rev0_574 - __rev1_574 * __noswap_splat_laneq_f32(__rev2_574, __p3_574); \
-  __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 1, 0); \
-  __ret_574; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \
-  int32x2_t __s0_575 = __p0_575; \
-  int32x2_t __s1_575 = __p1_575; \
-  int32x4_t __s2_575 = __p2_575; \
-  int32x2_t __ret_575; \
-  __ret_575 = __s0_575 - __s1_575 * splat_laneq_s32(__s2_575, __p3_575); \
+#define vmlsq_laneq_u32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \
+  uint32x4_t __s0_575 = __p0_575; \
+  uint32x4_t __s1_575 = __p1_575; \
+  uint32x4_t __s2_575 = __p2_575; \
+  uint32x4_t __ret_575; \
+  __ret_575 = __s0_575 - __s1_575 * splatq_laneq_u32(__s2_575, __p3_575); \
   __ret_575; \
 })
 #else
-#define vmls_laneq_s32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \
-  int32x2_t __s0_576 = __p0_576; \
-  int32x2_t __s1_576 = __p1_576; \
-  int32x4_t __s2_576 = __p2_576; \
-  int32x2_t __rev0_576;  __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 1, 0); \
-  int32x2_t __rev1_576;  __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 1, 0); \
-  int32x4_t __rev2_576;  __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \
-  int32x2_t __ret_576; \
-  __ret_576 = __rev0_576 - __rev1_576 * __noswap_splat_laneq_s32(__rev2_576, __p3_576); \
-  __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 1, 0); \
+#define vmlsq_laneq_u32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \
+  uint32x4_t __s0_576 = __p0_576; \
+  uint32x4_t __s1_576 = __p1_576; \
+  uint32x4_t __s2_576 = __p2_576; \
+  uint32x4_t __rev0_576;  __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 3, 2, 1, 0); \
+  uint32x4_t __rev1_576;  __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 3, 2, 1, 0); \
+  uint32x4_t __rev2_576;  __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \
+  uint32x4_t __ret_576; \
+  __ret_576 = __rev0_576 - __rev1_576 * __noswap_splatq_laneq_u32(__rev2_576, __p3_576); \
+  __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 3, 2, 1, 0); \
   __ret_576; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s16(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \
-  int16x4_t __s0_577 = __p0_577; \
-  int16x4_t __s1_577 = __p1_577; \
-  int16x8_t __s2_577 = __p2_577; \
-  int16x4_t __ret_577; \
-  __ret_577 = __s0_577 - __s1_577 * splat_laneq_s16(__s2_577, __p3_577); \
+#define vmlsq_laneq_u16(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \
+  uint16x8_t __s0_577 = __p0_577; \
+  uint16x8_t __s1_577 = __p1_577; \
+  uint16x8_t __s2_577 = __p2_577; \
+  uint16x8_t __ret_577; \
+  __ret_577 = __s0_577 - __s1_577 * splatq_laneq_u16(__s2_577, __p3_577); \
   __ret_577; \
 })
 #else
-#define vmls_laneq_s16(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \
-  int16x4_t __s0_578 = __p0_578; \
-  int16x4_t __s1_578 = __p1_578; \
-  int16x8_t __s2_578 = __p2_578; \
-  int16x4_t __rev0_578;  __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 3, 2, 1, 0); \
-  int16x4_t __rev1_578;  __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 3, 2, 1, 0); \
-  int16x8_t __rev2_578;  __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_578; \
-  __ret_578 = __rev0_578 - __rev1_578 * __noswap_splat_laneq_s16(__rev2_578, __p3_578); \
-  __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 3, 2, 1, 0); \
+#define vmlsq_laneq_u16(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \
+  uint16x8_t __s0_578 = __p0_578; \
+  uint16x8_t __s1_578 = __p1_578; \
+  uint16x8_t __s2_578 = __p2_578; \
+  uint16x8_t __rev0_578;  __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_578;  __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_578;  __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_578; \
+  __ret_578 = __rev0_578 - __rev1_578 * __noswap_splatq_laneq_u16(__rev2_578, __p3_578); \
+  __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_578; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u32(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \
-  uint64x2_t __s0_579 = __p0_579; \
-  uint32x4_t __s1_579 = __p1_579; \
-  uint32x2_t __s2_579 = __p2_579; \
-  uint64x2_t __ret_579; \
-  __ret_579 = __s0_579 - vmull_u32(vget_high_u32(__s1_579), splat_lane_u32(__s2_579, __p3_579)); \
+#define vmlsq_laneq_f32(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \
+  float32x4_t __s0_579 = __p0_579; \
+  float32x4_t __s1_579 = __p1_579; \
+  float32x4_t __s2_579 = __p2_579; \
+  float32x4_t __ret_579; \
+  __ret_579 = __s0_579 - __s1_579 * splatq_laneq_f32(__s2_579, __p3_579); \
   __ret_579; \
 })
 #else
-#define vmlsl_high_lane_u32(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \
-  uint64x2_t __s0_580 = __p0_580; \
-  uint32x4_t __s1_580 = __p1_580; \
-  uint32x2_t __s2_580 = __p2_580; \
-  uint64x2_t __rev0_580;  __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 1, 0); \
-  uint32x4_t __rev1_580;  __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \
-  uint32x2_t __rev2_580;  __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 1, 0); \
-  uint64x2_t __ret_580; \
-  __ret_580 = __rev0_580 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_580), __noswap_splat_lane_u32(__rev2_580, __p3_580)); \
-  __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 1, 0); \
+#define vmlsq_laneq_f32(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \
+  float32x4_t __s0_580 = __p0_580; \
+  float32x4_t __s1_580 = __p1_580; \
+  float32x4_t __s2_580 = __p2_580; \
+  float32x4_t __rev0_580;  __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 3, 2, 1, 0); \
+  float32x4_t __rev1_580;  __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \
+  float32x4_t __rev2_580;  __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 3, 2, 1, 0); \
+  float32x4_t __ret_580; \
+  __ret_580 = __rev0_580 - __rev1_580 * __noswap_splatq_laneq_f32(__rev2_580, __p3_580); \
+  __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 3, 2, 1, 0); \
   __ret_580; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u16(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \
-  uint32x4_t __s0_581 = __p0_581; \
-  uint16x8_t __s1_581 = __p1_581; \
-  uint16x4_t __s2_581 = __p2_581; \
-  uint32x4_t __ret_581; \
-  __ret_581 = __s0_581 - vmull_u16(vget_high_u16(__s1_581), splat_lane_u16(__s2_581, __p3_581)); \
+#define vmlsq_laneq_s32(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \
+  int32x4_t __s0_581 = __p0_581; \
+  int32x4_t __s1_581 = __p1_581; \
+  int32x4_t __s2_581 = __p2_581; \
+  int32x4_t __ret_581; \
+  __ret_581 = __s0_581 - __s1_581 * splatq_laneq_s32(__s2_581, __p3_581); \
   __ret_581; \
 })
 #else
-#define vmlsl_high_lane_u16(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \
-  uint32x4_t __s0_582 = __p0_582; \
-  uint16x8_t __s1_582 = __p1_582; \
-  uint16x4_t __s2_582 = __p2_582; \
-  uint32x4_t __rev0_582;  __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 3, 2, 1, 0); \
-  uint16x8_t __rev1_582;  __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_582;  __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \
-  uint32x4_t __ret_582; \
-  __ret_582 = __rev0_582 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_582), __noswap_splat_lane_u16(__rev2_582, __p3_582)); \
+#define vmlsq_laneq_s32(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \
+  int32x4_t __s0_582 = __p0_582; \
+  int32x4_t __s1_582 = __p1_582; \
+  int32x4_t __s2_582 = __p2_582; \
+  int32x4_t __rev0_582;  __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 3, 2, 1, 0); \
+  int32x4_t __rev1_582;  __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 3, 2, 1, 0); \
+  int32x4_t __rev2_582;  __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \
+  int32x4_t __ret_582; \
+  __ret_582 = __rev0_582 - __rev1_582 * __noswap_splatq_laneq_s32(__rev2_582, __p3_582); \
   __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 3, 2, 1, 0); \
   __ret_582; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s32(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \
-  int64x2_t __s0_583 = __p0_583; \
-  int32x4_t __s1_583 = __p1_583; \
-  int32x2_t __s2_583 = __p2_583; \
-  int64x2_t __ret_583; \
-  __ret_583 = __s0_583 - vmull_s32(vget_high_s32(__s1_583), splat_lane_s32(__s2_583, __p3_583)); \
+#define vmlsq_laneq_s16(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \
+  int16x8_t __s0_583 = __p0_583; \
+  int16x8_t __s1_583 = __p1_583; \
+  int16x8_t __s2_583 = __p2_583; \
+  int16x8_t __ret_583; \
+  __ret_583 = __s0_583 - __s1_583 * splatq_laneq_s16(__s2_583, __p3_583); \
   __ret_583; \
 })
 #else
-#define vmlsl_high_lane_s32(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \
-  int64x2_t __s0_584 = __p0_584; \
-  int32x4_t __s1_584 = __p1_584; \
-  int32x2_t __s2_584 = __p2_584; \
-  int64x2_t __rev0_584;  __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 1, 0); \
-  int32x4_t __rev1_584;  __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 3, 2, 1, 0); \
-  int32x2_t __rev2_584;  __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 1, 0); \
-  int64x2_t __ret_584; \
-  __ret_584 = __rev0_584 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_584), __noswap_splat_lane_s32(__rev2_584, __p3_584)); \
-  __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 1, 0); \
+#define vmlsq_laneq_s16(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \
+  int16x8_t __s0_584 = __p0_584; \
+  int16x8_t __s1_584 = __p1_584; \
+  int16x8_t __s2_584 = __p2_584; \
+  int16x8_t __rev0_584;  __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_584;  __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_584;  __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_584; \
+  __ret_584 = __rev0_584 - __rev1_584 * __noswap_splatq_laneq_s16(__rev2_584, __p3_584); \
+  __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_584; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s16(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \
-  int32x4_t __s0_585 = __p0_585; \
-  int16x8_t __s1_585 = __p1_585; \
-  int16x4_t __s2_585 = __p2_585; \
-  int32x4_t __ret_585; \
-  __ret_585 = __s0_585 - vmull_s16(vget_high_s16(__s1_585), splat_lane_s16(__s2_585, __p3_585)); \
+#define vmls_laneq_u32(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \
+  uint32x2_t __s0_585 = __p0_585; \
+  uint32x2_t __s1_585 = __p1_585; \
+  uint32x4_t __s2_585 = __p2_585; \
+  uint32x2_t __ret_585; \
+  __ret_585 = __s0_585 - __s1_585 * splat_laneq_u32(__s2_585, __p3_585); \
   __ret_585; \
 })
 #else
-#define vmlsl_high_lane_s16(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \
-  int32x4_t __s0_586 = __p0_586; \
-  int16x8_t __s1_586 = __p1_586; \
-  int16x4_t __s2_586 = __p2_586; \
-  int32x4_t __rev0_586;  __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 3, 2, 1, 0); \
-  int16x8_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_586;  __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \
-  int32x4_t __ret_586; \
-  __ret_586 = __rev0_586 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_586), __noswap_splat_lane_s16(__rev2_586, __p3_586)); \
-  __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 3, 2, 1, 0); \
+#define vmls_laneq_u32(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \
+  uint32x2_t __s0_586 = __p0_586; \
+  uint32x2_t __s1_586 = __p1_586; \
+  uint32x4_t __s2_586 = __p2_586; \
+  uint32x2_t __rev0_586;  __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 1, 0); \
+  uint32x2_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 1, 0); \
+  uint32x4_t __rev2_586;  __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \
+  uint32x2_t __ret_586; \
+  __ret_586 = __rev0_586 - __rev1_586 * __noswap_splat_laneq_u32(__rev2_586, __p3_586); \
+  __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 1, 0); \
   __ret_586; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u32(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \
-  uint64x2_t __s0_587 = __p0_587; \
-  uint32x4_t __s1_587 = __p1_587; \
-  uint32x4_t __s2_587 = __p2_587; \
-  uint64x2_t __ret_587; \
-  __ret_587 = __s0_587 - vmull_u32(vget_high_u32(__s1_587), splat_laneq_u32(__s2_587, __p3_587)); \
+#define vmls_laneq_u16(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \
+  uint16x4_t __s0_587 = __p0_587; \
+  uint16x4_t __s1_587 = __p1_587; \
+  uint16x8_t __s2_587 = __p2_587; \
+  uint16x4_t __ret_587; \
+  __ret_587 = __s0_587 - __s1_587 * splat_laneq_u16(__s2_587, __p3_587); \
   __ret_587; \
 })
 #else
-#define vmlsl_high_laneq_u32(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \
-  uint64x2_t __s0_588 = __p0_588; \
-  uint32x4_t __s1_588 = __p1_588; \
-  uint32x4_t __s2_588 = __p2_588; \
-  uint64x2_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 1, 0); \
-  uint32x4_t __rev1_588;  __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \
-  uint32x4_t __rev2_588;  __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 3, 2, 1, 0); \
-  uint64x2_t __ret_588; \
-  __ret_588 = __rev0_588 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_588), __noswap_splat_laneq_u32(__rev2_588, __p3_588)); \
-  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 1, 0); \
+#define vmls_laneq_u16(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \
+  uint16x4_t __s0_588 = __p0_588; \
+  uint16x4_t __s1_588 = __p1_588; \
+  uint16x8_t __s2_588 = __p2_588; \
+  uint16x4_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 3, 2, 1, 0); \
+  uint16x4_t __rev1_588;  __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \
+  uint16x8_t __rev2_588;  __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_588; \
+  __ret_588 = __rev0_588 - __rev1_588 * __noswap_splat_laneq_u16(__rev2_588, __p3_588); \
+  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 3, 2, 1, 0); \
   __ret_588; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u16(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \
-  uint32x4_t __s0_589 = __p0_589; \
-  uint16x8_t __s1_589 = __p1_589; \
-  uint16x8_t __s2_589 = __p2_589; \
-  uint32x4_t __ret_589; \
-  __ret_589 = __s0_589 - vmull_u16(vget_high_u16(__s1_589), splat_laneq_u16(__s2_589, __p3_589)); \
+#define vmls_laneq_f32(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \
+  float32x2_t __s0_589 = __p0_589; \
+  float32x2_t __s1_589 = __p1_589; \
+  float32x4_t __s2_589 = __p2_589; \
+  float32x2_t __ret_589; \
+  __ret_589 = __s0_589 - __s1_589 * splat_laneq_f32(__s2_589, __p3_589); \
   __ret_589; \
 })
 #else
-#define vmlsl_high_laneq_u16(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \
-  uint32x4_t __s0_590 = __p0_590; \
-  uint16x8_t __s1_590 = __p1_590; \
-  uint16x8_t __s2_590 = __p2_590; \
-  uint32x4_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \
-  uint16x8_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_590;  __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_590; \
-  __ret_590 = __rev0_590 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_590), __noswap_splat_laneq_u16(__rev2_590, __p3_590)); \
-  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 3, 2, 1, 0); \
+#define vmls_laneq_f32(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \
+  float32x2_t __s0_590 = __p0_590; \
+  float32x2_t __s1_590 = __p1_590; \
+  float32x4_t __s2_590 = __p2_590; \
+  float32x2_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 1, 0); \
+  float32x2_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \
+  float32x4_t __rev2_590;  __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 3, 2, 1, 0); \
+  float32x2_t __ret_590; \
+  __ret_590 = __rev0_590 - __rev1_590 * __noswap_splat_laneq_f32(__rev2_590, __p3_590); \
+  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 1, 0); \
   __ret_590; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \
-  int64x2_t __s0_591 = __p0_591; \
-  int32x4_t __s1_591 = __p1_591; \
+#define vmls_laneq_s32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \
+  int32x2_t __s0_591 = __p0_591; \
+  int32x2_t __s1_591 = __p1_591; \
   int32x4_t __s2_591 = __p2_591; \
-  int64x2_t __ret_591; \
-  __ret_591 = __s0_591 - vmull_s32(vget_high_s32(__s1_591), splat_laneq_s32(__s2_591, __p3_591)); \
+  int32x2_t __ret_591; \
+  __ret_591 = __s0_591 - __s1_591 * splat_laneq_s32(__s2_591, __p3_591); \
   __ret_591; \
 })
 #else
-#define vmlsl_high_laneq_s32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \
-  int64x2_t __s0_592 = __p0_592; \
-  int32x4_t __s1_592 = __p1_592; \
+#define vmls_laneq_s32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \
+  int32x2_t __s0_592 = __p0_592; \
+  int32x2_t __s1_592 = __p1_592; \
   int32x4_t __s2_592 = __p2_592; \
-  int64x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
-  int32x4_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 3, 2, 1, 0); \
+  int32x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
+  int32x2_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 1, 0); \
   int32x4_t __rev2_592;  __rev2_592 = __builtin_shufflevector(__s2_592, __s2_592, 3, 2, 1, 0); \
-  int64x2_t __ret_592; \
-  __ret_592 = __rev0_592 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_592), __noswap_splat_laneq_s32(__rev2_592, __p3_592)); \
+  int32x2_t __ret_592; \
+  __ret_592 = __rev0_592 - __rev1_592 * __noswap_splat_laneq_s32(__rev2_592, __p3_592); \
   __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \
   __ret_592; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \
-  int32x4_t __s0_593 = __p0_593; \
-  int16x8_t __s1_593 = __p1_593; \
+#define vmls_laneq_s16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \
+  int16x4_t __s0_593 = __p0_593; \
+  int16x4_t __s1_593 = __p1_593; \
   int16x8_t __s2_593 = __p2_593; \
-  int32x4_t __ret_593; \
-  __ret_593 = __s0_593 - vmull_s16(vget_high_s16(__s1_593), splat_laneq_s16(__s2_593, __p3_593)); \
+  int16x4_t __ret_593; \
+  __ret_593 = __s0_593 - __s1_593 * splat_laneq_s16(__s2_593, __p3_593); \
   __ret_593; \
 })
 #else
-#define vmlsl_high_laneq_s16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \
-  int32x4_t __s0_594 = __p0_594; \
-  int16x8_t __s1_594 = __p1_594; \
+#define vmls_laneq_s16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \
+  int16x4_t __s0_594 = __p0_594; \
+  int16x4_t __s1_594 = __p1_594; \
   int16x8_t __s2_594 = __p2_594; \
-  int32x4_t __rev0_594;  __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \
-  int16x8_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev0_594;  __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \
+  int16x4_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 3, 2, 1, 0); \
   int16x8_t __rev2_594;  __rev2_594 = __builtin_shufflevector(__s2_594, __s2_594, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_594; \
-  __ret_594 = __rev0_594 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_594), __noswap_splat_laneq_s16(__rev2_594, __p3_594)); \
+  int16x4_t __ret_594; \
+  __ret_594 = __rev0_594 - __rev1_594 * __noswap_splat_laneq_s16(__rev2_594, __p3_594); \
   __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \
   __ret_594; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \
+#define vmlsl_high_lane_u32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \
   uint64x2_t __s0_595 = __p0_595; \
-  uint32x2_t __s1_595 = __p1_595; \
-  uint32x4_t __s2_595 = __p2_595; \
+  uint32x4_t __s1_595 = __p1_595; \
+  uint32x2_t __s2_595 = __p2_595; \
   uint64x2_t __ret_595; \
-  __ret_595 = __s0_595 - vmull_u32(__s1_595, splat_laneq_u32(__s2_595, __p3_595)); \
+  __ret_595 = __s0_595 - vmull_u32(vget_high_u32(__s1_595), splat_lane_u32(__s2_595, __p3_595)); \
   __ret_595; \
 })
 #else
-#define vmlsl_laneq_u32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \
+#define vmlsl_high_lane_u32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \
   uint64x2_t __s0_596 = __p0_596; \
-  uint32x2_t __s1_596 = __p1_596; \
-  uint32x4_t __s2_596 = __p2_596; \
+  uint32x4_t __s1_596 = __p1_596; \
+  uint32x2_t __s2_596 = __p2_596; \
   uint64x2_t __rev0_596;  __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \
-  uint32x2_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 1, 0); \
-  uint32x4_t __rev2_596;  __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 3, 2, 1, 0); \
+  uint32x4_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 3, 2, 1, 0); \
+  uint32x2_t __rev2_596;  __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 1, 0); \
   uint64x2_t __ret_596; \
-  __ret_596 = __rev0_596 - __noswap_vmull_u32(__rev1_596, __noswap_splat_laneq_u32(__rev2_596, __p3_596)); \
+  __ret_596 = __rev0_596 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_596), __noswap_splat_lane_u32(__rev2_596, __p3_596)); \
   __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \
   __ret_596; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \
+#define vmlsl_high_lane_u16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \
   uint32x4_t __s0_597 = __p0_597; \
-  uint16x4_t __s1_597 = __p1_597; \
-  uint16x8_t __s2_597 = __p2_597; \
+  uint16x8_t __s1_597 = __p1_597; \
+  uint16x4_t __s2_597 = __p2_597; \
   uint32x4_t __ret_597; \
-  __ret_597 = __s0_597 - vmull_u16(__s1_597, splat_laneq_u16(__s2_597, __p3_597)); \
+  __ret_597 = __s0_597 - vmull_u16(vget_high_u16(__s1_597), splat_lane_u16(__s2_597, __p3_597)); \
   __ret_597; \
 })
 #else
-#define vmlsl_laneq_u16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \
+#define vmlsl_high_lane_u16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \
   uint32x4_t __s0_598 = __p0_598; \
-  uint16x4_t __s1_598 = __p1_598; \
-  uint16x8_t __s2_598 = __p2_598; \
+  uint16x8_t __s1_598 = __p1_598; \
+  uint16x4_t __s2_598 = __p2_598; \
   uint32x4_t __rev0_598;  __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \
-  uint16x4_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 3, 2, 1, 0); \
-  uint16x8_t __rev2_598;  __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_598;  __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 3, 2, 1, 0); \
   uint32x4_t __ret_598; \
-  __ret_598 = __rev0_598 - __noswap_vmull_u16(__rev1_598, __noswap_splat_laneq_u16(__rev2_598, __p3_598)); \
+  __ret_598 = __rev0_598 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_598), __noswap_splat_lane_u16(__rev2_598, __p3_598)); \
   __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 3, 2, 1, 0); \
   __ret_598; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \
+#define vmlsl_high_lane_s32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \
   int64x2_t __s0_599 = __p0_599; \
-  int32x2_t __s1_599 = __p1_599; \
-  int32x4_t __s2_599 = __p2_599; \
+  int32x4_t __s1_599 = __p1_599; \
+  int32x2_t __s2_599 = __p2_599; \
   int64x2_t __ret_599; \
-  __ret_599 = __s0_599 - vmull_s32(__s1_599, splat_laneq_s32(__s2_599, __p3_599)); \
+  __ret_599 = __s0_599 - vmull_s32(vget_high_s32(__s1_599), splat_lane_s32(__s2_599, __p3_599)); \
   __ret_599; \
 })
 #else
-#define vmlsl_laneq_s32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \
+#define vmlsl_high_lane_s32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \
   int64x2_t __s0_600 = __p0_600; \
-  int32x2_t __s1_600 = __p1_600; \
-  int32x4_t __s2_600 = __p2_600; \
+  int32x4_t __s1_600 = __p1_600; \
+  int32x2_t __s2_600 = __p2_600; \
   int64x2_t __rev0_600;  __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 1, 0); \
-  int32x2_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 1, 0); \
-  int32x4_t __rev2_600;  __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 3, 2, 1, 0); \
+  int32x4_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \
+  int32x2_t __rev2_600;  __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 1, 0); \
   int64x2_t __ret_600; \
-  __ret_600 = __rev0_600 - __noswap_vmull_s32(__rev1_600, __noswap_splat_laneq_s32(__rev2_600, __p3_600)); \
+  __ret_600 = __rev0_600 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_600), __noswap_splat_lane_s32(__rev2_600, __p3_600)); \
   __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 1, 0); \
   __ret_600; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \
+#define vmlsl_high_lane_s16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \
   int32x4_t __s0_601 = __p0_601; \
-  int16x4_t __s1_601 = __p1_601; \
-  int16x8_t __s2_601 = __p2_601; \
+  int16x8_t __s1_601 = __p1_601; \
+  int16x4_t __s2_601 = __p2_601; \
   int32x4_t __ret_601; \
-  __ret_601 = __s0_601 - vmull_s16(__s1_601, splat_laneq_s16(__s2_601, __p3_601)); \
+  __ret_601 = __s0_601 - vmull_s16(vget_high_s16(__s1_601), splat_lane_s16(__s2_601, __p3_601)); \
   __ret_601; \
 })
 #else
-#define vmlsl_laneq_s16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \
+#define vmlsl_high_lane_s16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \
   int32x4_t __s0_602 = __p0_602; \
-  int16x4_t __s1_602 = __p1_602; \
-  int16x8_t __s2_602 = __p2_602; \
+  int16x8_t __s1_602 = __p1_602; \
+  int16x4_t __s2_602 = __p2_602; \
   int32x4_t __rev0_602;  __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 3, 2, 1, 0); \
-  int16x4_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \
-  int16x8_t __rev2_602;  __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_602;  __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 3, 2, 1, 0); \
   int32x4_t __ret_602; \
-  __ret_602 = __rev0_602 - __noswap_vmull_s16(__rev1_602, __noswap_splat_laneq_s16(__rev2_602, __p3_602)); \
+  __ret_602 = __rev0_602 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_602), __noswap_splat_lane_s16(__rev2_602, __p3_602)); \
   __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 3, 2, 1, 0); \
   __ret_602; \
 })
 #endif
 
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_u32(__p0_603, __p1_603, __p2_603, __p3_603) __extension__ ({ \
+  uint64x2_t __s0_603 = __p0_603; \
+  uint32x4_t __s1_603 = __p1_603; \
+  uint32x4_t __s2_603 = __p2_603; \
+  uint64x2_t __ret_603; \
+  __ret_603 = __s0_603 - vmull_u32(vget_high_u32(__s1_603), splat_laneq_u32(__s2_603, __p3_603)); \
+  __ret_603; \
+})
+#else
+#define vmlsl_high_laneq_u32(__p0_604, __p1_604, __p2_604, __p3_604) __extension__ ({ \
+  uint64x2_t __s0_604 = __p0_604; \
+  uint32x4_t __s1_604 = __p1_604; \
+  uint32x4_t __s2_604 = __p2_604; \
+  uint64x2_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 1, 0); \
+  uint32x4_t __rev1_604;  __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \
+  uint32x4_t __rev2_604;  __rev2_604 = __builtin_shufflevector(__s2_604, __s2_604, 3, 2, 1, 0); \
+  uint64x2_t __ret_604; \
+  __ret_604 = __rev0_604 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_604), __noswap_splat_laneq_u32(__rev2_604, __p3_604)); \
+  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 1, 0); \
+  __ret_604; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_u16(__p0_605, __p1_605, __p2_605, __p3_605) __extension__ ({ \
+  uint32x4_t __s0_605 = __p0_605; \
+  uint16x8_t __s1_605 = __p1_605; \
+  uint16x8_t __s2_605 = __p2_605; \
+  uint32x4_t __ret_605; \
+  __ret_605 = __s0_605 - vmull_u16(vget_high_u16(__s1_605), splat_laneq_u16(__s2_605, __p3_605)); \
+  __ret_605; \
+})
+#else
+#define vmlsl_high_laneq_u16(__p0_606, __p1_606, __p2_606, __p3_606) __extension__ ({ \
+  uint32x4_t __s0_606 = __p0_606; \
+  uint16x8_t __s1_606 = __p1_606; \
+  uint16x8_t __s2_606 = __p2_606; \
+  uint32x4_t __rev0_606;  __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 3, 2, 1, 0); \
+  uint16x8_t __rev1_606;  __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_606;  __rev2_606 = __builtin_shufflevector(__s2_606, __s2_606, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_606; \
+  __ret_606 = __rev0_606 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_606), __noswap_splat_laneq_u16(__rev2_606, __p3_606)); \
+  __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 3, 2, 1, 0); \
+  __ret_606; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_s32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \
+  int64x2_t __s0_607 = __p0_607; \
+  int32x4_t __s1_607 = __p1_607; \
+  int32x4_t __s2_607 = __p2_607; \
+  int64x2_t __ret_607; \
+  __ret_607 = __s0_607 - vmull_s32(vget_high_s32(__s1_607), splat_laneq_s32(__s2_607, __p3_607)); \
+  __ret_607; \
+})
+#else
+#define vmlsl_high_laneq_s32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \
+  int64x2_t __s0_608 = __p0_608; \
+  int32x4_t __s1_608 = __p1_608; \
+  int32x4_t __s2_608 = __p2_608; \
+  int64x2_t __rev0_608;  __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \
+  int32x4_t __rev1_608;  __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 3, 2, 1, 0); \
+  int32x4_t __rev2_608;  __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 3, 2, 1, 0); \
+  int64x2_t __ret_608; \
+  __ret_608 = __rev0_608 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_608), __noswap_splat_laneq_s32(__rev2_608, __p3_608)); \
+  __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \
+  __ret_608; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_high_laneq_s16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \
+  int32x4_t __s0_609 = __p0_609; \
+  int16x8_t __s1_609 = __p1_609; \
+  int16x8_t __s2_609 = __p2_609; \
+  int32x4_t __ret_609; \
+  __ret_609 = __s0_609 - vmull_s16(vget_high_s16(__s1_609), splat_laneq_s16(__s2_609, __p3_609)); \
+  __ret_609; \
+})
+#else
+#define vmlsl_high_laneq_s16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \
+  int32x4_t __s0_610 = __p0_610; \
+  int16x8_t __s1_610 = __p1_610; \
+  int16x8_t __s2_610 = __p2_610; \
+  int32x4_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \
+  int16x8_t __rev1_610;  __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_610;  __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_610; \
+  __ret_610 = __rev0_610 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_610), __noswap_splat_laneq_s16(__rev2_610, __p3_610)); \
+  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \
+  __ret_610; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_u32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \
+  uint64x2_t __s0_611 = __p0_611; \
+  uint32x2_t __s1_611 = __p1_611; \
+  uint32x4_t __s2_611 = __p2_611; \
+  uint64x2_t __ret_611; \
+  __ret_611 = __s0_611 - vmull_u32(__s1_611, splat_laneq_u32(__s2_611, __p3_611)); \
+  __ret_611; \
+})
+#else
+#define vmlsl_laneq_u32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \
+  uint64x2_t __s0_612 = __p0_612; \
+  uint32x2_t __s1_612 = __p1_612; \
+  uint32x4_t __s2_612 = __p2_612; \
+  uint64x2_t __rev0_612;  __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \
+  uint32x2_t __rev1_612;  __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 1, 0); \
+  uint32x4_t __rev2_612;  __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \
+  uint64x2_t __ret_612; \
+  __ret_612 = __rev0_612 - __noswap_vmull_u32(__rev1_612, __noswap_splat_laneq_u32(__rev2_612, __p3_612)); \
+  __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \
+  __ret_612; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_u16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \
+  uint32x4_t __s0_613 = __p0_613; \
+  uint16x4_t __s1_613 = __p1_613; \
+  uint16x8_t __s2_613 = __p2_613; \
+  uint32x4_t __ret_613; \
+  __ret_613 = __s0_613 - vmull_u16(__s1_613, splat_laneq_u16(__s2_613, __p3_613)); \
+  __ret_613; \
+})
+#else
+#define vmlsl_laneq_u16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \
+  uint32x4_t __s0_614 = __p0_614; \
+  uint16x4_t __s1_614 = __p1_614; \
+  uint16x8_t __s2_614 = __p2_614; \
+  uint32x4_t __rev0_614;  __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \
+  uint16x4_t __rev1_614;  __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 3, 2, 1, 0); \
+  uint16x8_t __rev2_614;  __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_614; \
+  __ret_614 = __rev0_614 - __noswap_vmull_u16(__rev1_614, __noswap_splat_laneq_u16(__rev2_614, __p3_614)); \
+  __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \
+  __ret_614; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_s32(__p0_615, __p1_615, __p2_615, __p3_615) __extension__ ({ \
+  int64x2_t __s0_615 = __p0_615; \
+  int32x2_t __s1_615 = __p1_615; \
+  int32x4_t __s2_615 = __p2_615; \
+  int64x2_t __ret_615; \
+  __ret_615 = __s0_615 - vmull_s32(__s1_615, splat_laneq_s32(__s2_615, __p3_615)); \
+  __ret_615; \
+})
+#else
+#define vmlsl_laneq_s32(__p0_616, __p1_616, __p2_616, __p3_616) __extension__ ({ \
+  int64x2_t __s0_616 = __p0_616; \
+  int32x2_t __s1_616 = __p1_616; \
+  int32x4_t __s2_616 = __p2_616; \
+  int64x2_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__s0_616, __s0_616, 1, 0); \
+  int32x2_t __rev1_616;  __rev1_616 = __builtin_shufflevector(__s1_616, __s1_616, 1, 0); \
+  int32x4_t __rev2_616;  __rev2_616 = __builtin_shufflevector(__s2_616, __s2_616, 3, 2, 1, 0); \
+  int64x2_t __ret_616; \
+  __ret_616 = __rev0_616 - __noswap_vmull_s32(__rev1_616, __noswap_splat_laneq_s32(__rev2_616, __p3_616)); \
+  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0); \
+  __ret_616; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsl_laneq_s16(__p0_617, __p1_617, __p2_617, __p3_617) __extension__ ({ \
+  int32x4_t __s0_617 = __p0_617; \
+  int16x4_t __s1_617 = __p1_617; \
+  int16x8_t __s2_617 = __p2_617; \
+  int32x4_t __ret_617; \
+  __ret_617 = __s0_617 - vmull_s16(__s1_617, splat_laneq_s16(__s2_617, __p3_617)); \
+  __ret_617; \
+})
+#else
+#define vmlsl_laneq_s16(__p0_618, __p1_618, __p2_618, __p3_618) __extension__ ({ \
+  int32x4_t __s0_618 = __p0_618; \
+  int16x4_t __s1_618 = __p1_618; \
+  int16x8_t __s2_618 = __p2_618; \
+  int32x4_t __rev0_618;  __rev0_618 = __builtin_shufflevector(__s0_618, __s0_618, 3, 2, 1, 0); \
+  int16x4_t __rev1_618;  __rev1_618 = __builtin_shufflevector(__s1_618, __s1_618, 3, 2, 1, 0); \
+  int16x8_t __rev2_618;  __rev2_618 = __builtin_shufflevector(__s2_618, __s2_618, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_618; \
+  __ret_618 = __rev0_618 - __noswap_vmull_s16(__rev1_618, __noswap_splat_laneq_s16(__rev2_618, __p3_618)); \
+  __ret_618 = __builtin_shufflevector(__ret_618, __ret_618, 3, 2, 1, 0); \
+  __ret_618; \
+})
+#endif
+
 __ai poly64x1_t vmov_n_p64(poly64_t __p0) {
   poly64x1_t __ret;
   __ret = (poly64x1_t) {__p0};
@@ -56283,147 +56471,147 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_603) {
-  uint16x8_t __ret_603;
-  uint8x8_t __a1_603 = vget_high_u8(__p0_603);
-  __ret_603 = (uint16x8_t)(vshll_n_u8(__a1_603, 0));
-  return __ret_603;
-}
-#else
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_604) {
-  uint8x16_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__p0_604, __p0_604, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret_604;
-  uint8x8_t __a1_604 = __noswap_vget_high_u8(__rev0_604);
-  __ret_604 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_604, 0));
-  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_604;
-}
-__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_605) {
-  uint16x8_t __ret_605;
-  uint8x8_t __a1_605 = __noswap_vget_high_u8(__p0_605);
-  __ret_605 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_605, 0));
-  return __ret_605;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_606) {
-  uint64x2_t __ret_606;
-  uint32x2_t __a1_606 = vget_high_u32(__p0_606);
-  __ret_606 = (uint64x2_t)(vshll_n_u32(__a1_606, 0));
-  return __ret_606;
-}
-#else
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_607) {
-  uint32x4_t __rev0_607;  __rev0_607 = __builtin_shufflevector(__p0_607, __p0_607, 3, 2, 1, 0);
-  uint64x2_t __ret_607;
-  uint32x2_t __a1_607 = __noswap_vget_high_u32(__rev0_607);
-  __ret_607 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_607, 0));
-  __ret_607 = __builtin_shufflevector(__ret_607, __ret_607, 1, 0);
-  return __ret_607;
-}
-__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_608) {
-  uint64x2_t __ret_608;
-  uint32x2_t __a1_608 = __noswap_vget_high_u32(__p0_608);
-  __ret_608 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_608, 0));
-  return __ret_608;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_609) {
-  uint32x4_t __ret_609;
-  uint16x4_t __a1_609 = vget_high_u16(__p0_609);
-  __ret_609 = (uint32x4_t)(vshll_n_u16(__a1_609, 0));
-  return __ret_609;
-}
-#else
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_610) {
-  uint16x8_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__p0_610, __p0_610, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret_610;
-  uint16x4_t __a1_610 = __noswap_vget_high_u16(__rev0_610);
-  __ret_610 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_610, 0));
-  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0);
-  return __ret_610;
-}
-__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_611) {
-  uint32x4_t __ret_611;
-  uint16x4_t __a1_611 = __noswap_vget_high_u16(__p0_611);
-  __ret_611 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_611, 0));
-  return __ret_611;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_612) {
-  int16x8_t __ret_612;
-  int8x8_t __a1_612 = vget_high_s8(__p0_612);
-  __ret_612 = (int16x8_t)(vshll_n_s8(__a1_612, 0));
-  return __ret_612;
-}
-#else
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_613) {
-  int8x16_t __rev0_613;  __rev0_613 = __builtin_shufflevector(__p0_613, __p0_613, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret_613;
-  int8x8_t __a1_613 = __noswap_vget_high_s8(__rev0_613);
-  __ret_613 = (int16x8_t)(__noswap_vshll_n_s8(__a1_613, 0));
-  __ret_613 = __builtin_shufflevector(__ret_613, __ret_613, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_613;
-}
-__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_614) {
-  int16x8_t __ret_614;
-  int8x8_t __a1_614 = __noswap_vget_high_s8(__p0_614);
-  __ret_614 = (int16x8_t)(__noswap_vshll_n_s8(__a1_614, 0));
-  return __ret_614;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_615) {
-  int64x2_t __ret_615;
-  int32x2_t __a1_615 = vget_high_s32(__p0_615);
-  __ret_615 = (int64x2_t)(vshll_n_s32(__a1_615, 0));
-  return __ret_615;
-}
-#else
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_616) {
-  int32x4_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__p0_616, __p0_616, 3, 2, 1, 0);
-  int64x2_t __ret_616;
-  int32x2_t __a1_616 = __noswap_vget_high_s32(__rev0_616);
-  __ret_616 = (int64x2_t)(__noswap_vshll_n_s32(__a1_616, 0));
-  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0);
-  return __ret_616;
-}
-__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_617) {
-  int64x2_t __ret_617;
-  int32x2_t __a1_617 = __noswap_vget_high_s32(__p0_617);
-  __ret_617 = (int64x2_t)(__noswap_vshll_n_s32(__a1_617, 0));
-  return __ret_617;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_618) {
-  int32x4_t __ret_618;
-  int16x4_t __a1_618 = vget_high_s16(__p0_618);
-  __ret_618 = (int32x4_t)(vshll_n_s16(__a1_618, 0));
-  return __ret_618;
-}
-#else
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_619) {
-  int16x8_t __rev0_619;  __rev0_619 = __builtin_shufflevector(__p0_619, __p0_619, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret_619;
-  int16x4_t __a1_619 = __noswap_vget_high_s16(__rev0_619);
-  __ret_619 = (int32x4_t)(__noswap_vshll_n_s16(__a1_619, 0));
-  __ret_619 = __builtin_shufflevector(__ret_619, __ret_619, 3, 2, 1, 0);
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_619) {
+  uint16x8_t __ret_619;
+  uint8x8_t __a1_619 = vget_high_u8(__p0_619);
+  __ret_619 = (uint16x8_t)(vshll_n_u8(__a1_619, 0));
   return __ret_619;
 }
-__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_620) {
-  int32x4_t __ret_620;
-  int16x4_t __a1_620 = __noswap_vget_high_s16(__p0_620);
-  __ret_620 = (int32x4_t)(__noswap_vshll_n_s16(__a1_620, 0));
+#else
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_620) {
+  uint8x16_t __rev0_620;  __rev0_620 = __builtin_shufflevector(__p0_620, __p0_620, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __ret_620;
+  uint8x8_t __a1_620 = __noswap_vget_high_u8(__rev0_620);
+  __ret_620 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_620, 0));
+  __ret_620 = __builtin_shufflevector(__ret_620, __ret_620, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret_620;
 }
+__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_621) {
+  uint16x8_t __ret_621;
+  uint8x8_t __a1_621 = __noswap_vget_high_u8(__p0_621);
+  __ret_621 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_621, 0));
+  return __ret_621;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_622) {
+  uint64x2_t __ret_622;
+  uint32x2_t __a1_622 = vget_high_u32(__p0_622);
+  __ret_622 = (uint64x2_t)(vshll_n_u32(__a1_622, 0));
+  return __ret_622;
+}
+#else
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_623) {
+  uint32x4_t __rev0_623;  __rev0_623 = __builtin_shufflevector(__p0_623, __p0_623, 3, 2, 1, 0);
+  uint64x2_t __ret_623;
+  uint32x2_t __a1_623 = __noswap_vget_high_u32(__rev0_623);
+  __ret_623 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_623, 0));
+  __ret_623 = __builtin_shufflevector(__ret_623, __ret_623, 1, 0);
+  return __ret_623;
+}
+__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_624) {
+  uint64x2_t __ret_624;
+  uint32x2_t __a1_624 = __noswap_vget_high_u32(__p0_624);
+  __ret_624 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_624, 0));
+  return __ret_624;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_625) {
+  uint32x4_t __ret_625;
+  uint16x4_t __a1_625 = vget_high_u16(__p0_625);
+  __ret_625 = (uint32x4_t)(vshll_n_u16(__a1_625, 0));
+  return __ret_625;
+}
+#else
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_626) {
+  uint16x8_t __rev0_626;  __rev0_626 = __builtin_shufflevector(__p0_626, __p0_626, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint32x4_t __ret_626;
+  uint16x4_t __a1_626 = __noswap_vget_high_u16(__rev0_626);
+  __ret_626 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_626, 0));
+  __ret_626 = __builtin_shufflevector(__ret_626, __ret_626, 3, 2, 1, 0);
+  return __ret_626;
+}
+__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_627) {
+  uint32x4_t __ret_627;
+  uint16x4_t __a1_627 = __noswap_vget_high_u16(__p0_627);
+  __ret_627 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_627, 0));
+  return __ret_627;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_628) {
+  int16x8_t __ret_628;
+  int8x8_t __a1_628 = vget_high_s8(__p0_628);
+  __ret_628 = (int16x8_t)(vshll_n_s8(__a1_628, 0));
+  return __ret_628;
+}
+#else
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_629) {
+  int8x16_t __rev0_629;  __rev0_629 = __builtin_shufflevector(__p0_629, __p0_629, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __ret_629;
+  int8x8_t __a1_629 = __noswap_vget_high_s8(__rev0_629);
+  __ret_629 = (int16x8_t)(__noswap_vshll_n_s8(__a1_629, 0));
+  __ret_629 = __builtin_shufflevector(__ret_629, __ret_629, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret_629;
+}
+__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_630) {
+  int16x8_t __ret_630;
+  int8x8_t __a1_630 = __noswap_vget_high_s8(__p0_630);
+  __ret_630 = (int16x8_t)(__noswap_vshll_n_s8(__a1_630, 0));
+  return __ret_630;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_631) {
+  int64x2_t __ret_631;
+  int32x2_t __a1_631 = vget_high_s32(__p0_631);
+  __ret_631 = (int64x2_t)(vshll_n_s32(__a1_631, 0));
+  return __ret_631;
+}
+#else
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_632) {
+  int32x4_t __rev0_632;  __rev0_632 = __builtin_shufflevector(__p0_632, __p0_632, 3, 2, 1, 0);
+  int64x2_t __ret_632;
+  int32x2_t __a1_632 = __noswap_vget_high_s32(__rev0_632);
+  __ret_632 = (int64x2_t)(__noswap_vshll_n_s32(__a1_632, 0));
+  __ret_632 = __builtin_shufflevector(__ret_632, __ret_632, 1, 0);
+  return __ret_632;
+}
+__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_633) {
+  int64x2_t __ret_633;
+  int32x2_t __a1_633 = __noswap_vget_high_s32(__p0_633);
+  __ret_633 = (int64x2_t)(__noswap_vshll_n_s32(__a1_633, 0));
+  return __ret_633;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_634) {
+  int32x4_t __ret_634;
+  int16x4_t __a1_634 = vget_high_s16(__p0_634);
+  __ret_634 = (int32x4_t)(vshll_n_s16(__a1_634, 0));
+  return __ret_634;
+}
+#else
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_635) {
+  int16x8_t __rev0_635;  __rev0_635 = __builtin_shufflevector(__p0_635, __p0_635, 7, 6, 5, 4, 3, 2, 1, 0);
+  int32x4_t __ret_635;
+  int16x4_t __a1_635 = __noswap_vget_high_s16(__rev0_635);
+  __ret_635 = (int32x4_t)(__noswap_vshll_n_s16(__a1_635, 0));
+  __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 3, 2, 1, 0);
+  return __ret_635;
+}
+__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_636) {
+  int32x4_t __ret_636;
+  int16x4_t __a1_636 = __noswap_vget_high_s16(__p0_636);
+  __ret_636 = (int32x4_t)(__noswap_vshll_n_s16(__a1_636, 0));
+  return __ret_636;
+}
 #endif
 
 #ifdef __LITTLE_ENDIAN__
@@ -56550,29 +56738,29 @@
   __ret = __p0 * __p1;
   return __ret;
 }
-#define vmuld_lane_f64(__p0_621, __p1_621, __p2_621) __extension__ ({ \
-  float64_t __s0_621 = __p0_621; \
-  float64x1_t __s1_621 = __p1_621; \
-  float64_t __ret_621; \
-  __ret_621 = __s0_621 * vget_lane_f64(__s1_621, __p2_621); \
-  __ret_621; \
+#define vmuld_lane_f64(__p0_637, __p1_637, __p2_637) __extension__ ({ \
+  float64_t __s0_637 = __p0_637; \
+  float64x1_t __s1_637 = __p1_637; \
+  float64_t __ret_637; \
+  __ret_637 = __s0_637 * vget_lane_f64(__s1_637, __p2_637); \
+  __ret_637; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmuls_lane_f32(__p0_622, __p1_622, __p2_622) __extension__ ({ \
-  float32_t __s0_622 = __p0_622; \
-  float32x2_t __s1_622 = __p1_622; \
-  float32_t __ret_622; \
-  __ret_622 = __s0_622 * vget_lane_f32(__s1_622, __p2_622); \
-  __ret_622; \
+#define vmuls_lane_f32(__p0_638, __p1_638, __p2_638) __extension__ ({ \
+  float32_t __s0_638 = __p0_638; \
+  float32x2_t __s1_638 = __p1_638; \
+  float32_t __ret_638; \
+  __ret_638 = __s0_638 * vget_lane_f32(__s1_638, __p2_638); \
+  __ret_638; \
 })
 #else
-#define vmuls_lane_f32(__p0_623, __p1_623, __p2_623) __extension__ ({ \
-  float32_t __s0_623 = __p0_623; \
-  float32x2_t __s1_623 = __p1_623; \
-  float32x2_t __rev1_623;  __rev1_623 = __builtin_shufflevector(__s1_623, __s1_623, 1, 0); \
-  float32_t __ret_623; \
-  __ret_623 = __s0_623 * __noswap_vget_lane_f32(__rev1_623, __p2_623); \
-  __ret_623; \
+#define vmuls_lane_f32(__p0_639, __p1_639, __p2_639) __extension__ ({ \
+  float32_t __s0_639 = __p0_639; \
+  float32x2_t __s1_639 = __p1_639; \
+  float32x2_t __rev1_639;  __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 1, 0); \
+  float32_t __ret_639; \
+  __ret_639 = __s0_639 * __noswap_vget_lane_f32(__rev1_639, __p2_639); \
+  __ret_639; \
 })
 #endif
 
@@ -56584,60 +56772,60 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f64(__p0_624, __p1_624, __p2_624) __extension__ ({ \
-  float64x2_t __s0_624 = __p0_624; \
-  float64x1_t __s1_624 = __p1_624; \
-  float64x2_t __ret_624; \
-  __ret_624 = __s0_624 * splatq_lane_f64(__s1_624, __p2_624); \
-  __ret_624; \
+#define vmulq_lane_f64(__p0_640, __p1_640, __p2_640) __extension__ ({ \
+  float64x2_t __s0_640 = __p0_640; \
+  float64x1_t __s1_640 = __p1_640; \
+  float64x2_t __ret_640; \
+  __ret_640 = __s0_640 * splatq_lane_f64(__s1_640, __p2_640); \
+  __ret_640; \
 })
 #else
-#define vmulq_lane_f64(__p0_625, __p1_625, __p2_625) __extension__ ({ \
-  float64x2_t __s0_625 = __p0_625; \
-  float64x1_t __s1_625 = __p1_625; \
-  float64x2_t __rev0_625;  __rev0_625 = __builtin_shufflevector(__s0_625, __s0_625, 1, 0); \
-  float64x2_t __ret_625; \
-  __ret_625 = __rev0_625 * __noswap_splatq_lane_f64(__s1_625, __p2_625); \
-  __ret_625 = __builtin_shufflevector(__ret_625, __ret_625, 1, 0); \
-  __ret_625; \
+#define vmulq_lane_f64(__p0_641, __p1_641, __p2_641) __extension__ ({ \
+  float64x2_t __s0_641 = __p0_641; \
+  float64x1_t __s1_641 = __p1_641; \
+  float64x2_t __rev0_641;  __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 1, 0); \
+  float64x2_t __ret_641; \
+  __ret_641 = __rev0_641 * __noswap_splatq_lane_f64(__s1_641, __p2_641); \
+  __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 1, 0); \
+  __ret_641; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmuld_laneq_f64(__p0_626, __p1_626, __p2_626) __extension__ ({ \
-  float64_t __s0_626 = __p0_626; \
-  float64x2_t __s1_626 = __p1_626; \
-  float64_t __ret_626; \
-  __ret_626 = __s0_626 * vgetq_lane_f64(__s1_626, __p2_626); \
-  __ret_626; \
+#define vmuld_laneq_f64(__p0_642, __p1_642, __p2_642) __extension__ ({ \
+  float64_t __s0_642 = __p0_642; \
+  float64x2_t __s1_642 = __p1_642; \
+  float64_t __ret_642; \
+  __ret_642 = __s0_642 * vgetq_lane_f64(__s1_642, __p2_642); \
+  __ret_642; \
 })
 #else
-#define vmuld_laneq_f64(__p0_627, __p1_627, __p2_627) __extension__ ({ \
-  float64_t __s0_627 = __p0_627; \
-  float64x2_t __s1_627 = __p1_627; \
-  float64x2_t __rev1_627;  __rev1_627 = __builtin_shufflevector(__s1_627, __s1_627, 1, 0); \
-  float64_t __ret_627; \
-  __ret_627 = __s0_627 * __noswap_vgetq_lane_f64(__rev1_627, __p2_627); \
-  __ret_627; \
+#define vmuld_laneq_f64(__p0_643, __p1_643, __p2_643) __extension__ ({ \
+  float64_t __s0_643 = __p0_643; \
+  float64x2_t __s1_643 = __p1_643; \
+  float64x2_t __rev1_643;  __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 1, 0); \
+  float64_t __ret_643; \
+  __ret_643 = __s0_643 * __noswap_vgetq_lane_f64(__rev1_643, __p2_643); \
+  __ret_643; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmuls_laneq_f32(__p0_628, __p1_628, __p2_628) __extension__ ({ \
-  float32_t __s0_628 = __p0_628; \
-  float32x4_t __s1_628 = __p1_628; \
-  float32_t __ret_628; \
-  __ret_628 = __s0_628 * vgetq_lane_f32(__s1_628, __p2_628); \
-  __ret_628; \
+#define vmuls_laneq_f32(__p0_644, __p1_644, __p2_644) __extension__ ({ \
+  float32_t __s0_644 = __p0_644; \
+  float32x4_t __s1_644 = __p1_644; \
+  float32_t __ret_644; \
+  __ret_644 = __s0_644 * vgetq_lane_f32(__s1_644, __p2_644); \
+  __ret_644; \
 })
 #else
-#define vmuls_laneq_f32(__p0_629, __p1_629, __p2_629) __extension__ ({ \
-  float32_t __s0_629 = __p0_629; \
-  float32x4_t __s1_629 = __p1_629; \
-  float32x4_t __rev1_629;  __rev1_629 = __builtin_shufflevector(__s1_629, __s1_629, 3, 2, 1, 0); \
-  float32_t __ret_629; \
-  __ret_629 = __s0_629 * __noswap_vgetq_lane_f32(__rev1_629, __p2_629); \
-  __ret_629; \
+#define vmuls_laneq_f32(__p0_645, __p1_645, __p2_645) __extension__ ({ \
+  float32_t __s0_645 = __p0_645; \
+  float32x4_t __s1_645 = __p1_645; \
+  float32x4_t __rev1_645;  __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 3, 2, 1, 0); \
+  float32_t __ret_645; \
+  __ret_645 = __s0_645 * __noswap_vgetq_lane_f32(__rev1_645, __p2_645); \
+  __ret_645; \
 })
 #endif
 
@@ -56661,236 +56849,236 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u32(__p0_630, __p1_630, __p2_630) __extension__ ({ \
-  uint32x4_t __s0_630 = __p0_630; \
-  uint32x4_t __s1_630 = __p1_630; \
-  uint32x4_t __ret_630; \
-  __ret_630 = __s0_630 * splatq_laneq_u32(__s1_630, __p2_630); \
-  __ret_630; \
-})
-#else
-#define vmulq_laneq_u32(__p0_631, __p1_631, __p2_631) __extension__ ({ \
-  uint32x4_t __s0_631 = __p0_631; \
-  uint32x4_t __s1_631 = __p1_631; \
-  uint32x4_t __rev0_631;  __rev0_631 = __builtin_shufflevector(__s0_631, __s0_631, 3, 2, 1, 0); \
-  uint32x4_t __rev1_631;  __rev1_631 = __builtin_shufflevector(__s1_631, __s1_631, 3, 2, 1, 0); \
-  uint32x4_t __ret_631; \
-  __ret_631 = __rev0_631 * __noswap_splatq_laneq_u32(__rev1_631, __p2_631); \
-  __ret_631 = __builtin_shufflevector(__ret_631, __ret_631, 3, 2, 1, 0); \
-  __ret_631; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u16(__p0_632, __p1_632, __p2_632) __extension__ ({ \
-  uint16x8_t __s0_632 = __p0_632; \
-  uint16x8_t __s1_632 = __p1_632; \
-  uint16x8_t __ret_632; \
-  __ret_632 = __s0_632 * splatq_laneq_u16(__s1_632, __p2_632); \
-  __ret_632; \
-})
-#else
-#define vmulq_laneq_u16(__p0_633, __p1_633, __p2_633) __extension__ ({ \
-  uint16x8_t __s0_633 = __p0_633; \
-  uint16x8_t __s1_633 = __p1_633; \
-  uint16x8_t __rev0_633;  __rev0_633 = __builtin_shufflevector(__s0_633, __s0_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_633;  __rev1_633 = __builtin_shufflevector(__s1_633, __s1_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_633; \
-  __ret_633 = __rev0_633 * __noswap_splatq_laneq_u16(__rev1_633, __p2_633); \
-  __ret_633 = __builtin_shufflevector(__ret_633, __ret_633, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_633; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f64(__p0_634, __p1_634, __p2_634) __extension__ ({ \
-  float64x2_t __s0_634 = __p0_634; \
-  float64x2_t __s1_634 = __p1_634; \
-  float64x2_t __ret_634; \
-  __ret_634 = __s0_634 * splatq_laneq_f64(__s1_634, __p2_634); \
-  __ret_634; \
-})
-#else
-#define vmulq_laneq_f64(__p0_635, __p1_635, __p2_635) __extension__ ({ \
-  float64x2_t __s0_635 = __p0_635; \
-  float64x2_t __s1_635 = __p1_635; \
-  float64x2_t __rev0_635;  __rev0_635 = __builtin_shufflevector(__s0_635, __s0_635, 1, 0); \
-  float64x2_t __rev1_635;  __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, 1, 0); \
-  float64x2_t __ret_635; \
-  __ret_635 = __rev0_635 * __noswap_splatq_laneq_f64(__rev1_635, __p2_635); \
-  __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 1, 0); \
-  __ret_635; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f32(__p0_636, __p1_636, __p2_636) __extension__ ({ \
-  float32x4_t __s0_636 = __p0_636; \
-  float32x4_t __s1_636 = __p1_636; \
-  float32x4_t __ret_636; \
-  __ret_636 = __s0_636 * splatq_laneq_f32(__s1_636, __p2_636); \
-  __ret_636; \
-})
-#else
-#define vmulq_laneq_f32(__p0_637, __p1_637, __p2_637) __extension__ ({ \
-  float32x4_t __s0_637 = __p0_637; \
-  float32x4_t __s1_637 = __p1_637; \
-  float32x4_t __rev0_637;  __rev0_637 = __builtin_shufflevector(__s0_637, __s0_637, 3, 2, 1, 0); \
-  float32x4_t __rev1_637;  __rev1_637 = __builtin_shufflevector(__s1_637, __s1_637, 3, 2, 1, 0); \
-  float32x4_t __ret_637; \
-  __ret_637 = __rev0_637 * __noswap_splatq_laneq_f32(__rev1_637, __p2_637); \
-  __ret_637 = __builtin_shufflevector(__ret_637, __ret_637, 3, 2, 1, 0); \
-  __ret_637; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s32(__p0_638, __p1_638, __p2_638) __extension__ ({ \
-  int32x4_t __s0_638 = __p0_638; \
-  int32x4_t __s1_638 = __p1_638; \
-  int32x4_t __ret_638; \
-  __ret_638 = __s0_638 * splatq_laneq_s32(__s1_638, __p2_638); \
-  __ret_638; \
-})
-#else
-#define vmulq_laneq_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \
-  int32x4_t __s0_639 = __p0_639; \
-  int32x4_t __s1_639 = __p1_639; \
-  int32x4_t __rev0_639;  __rev0_639 = __builtin_shufflevector(__s0_639, __s0_639, 3, 2, 1, 0); \
-  int32x4_t __rev1_639;  __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 3, 2, 1, 0); \
-  int32x4_t __ret_639; \
-  __ret_639 = __rev0_639 * __noswap_splatq_laneq_s32(__rev1_639, __p2_639); \
-  __ret_639 = __builtin_shufflevector(__ret_639, __ret_639, 3, 2, 1, 0); \
-  __ret_639; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s16(__p0_640, __p1_640, __p2_640) __extension__ ({ \
-  int16x8_t __s0_640 = __p0_640; \
-  int16x8_t __s1_640 = __p1_640; \
-  int16x8_t __ret_640; \
-  __ret_640 = __s0_640 * splatq_laneq_s16(__s1_640, __p2_640); \
-  __ret_640; \
-})
-#else
-#define vmulq_laneq_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \
-  int16x8_t __s0_641 = __p0_641; \
-  int16x8_t __s1_641 = __p1_641; \
-  int16x8_t __rev0_641;  __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_641;  __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_641; \
-  __ret_641 = __rev0_641 * __noswap_splatq_laneq_s16(__rev1_641, __p2_641); \
-  __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_641; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u32(__p0_642, __p1_642, __p2_642) __extension__ ({ \
-  uint32x2_t __s0_642 = __p0_642; \
-  uint32x4_t __s1_642 = __p1_642; \
-  uint32x2_t __ret_642; \
-  __ret_642 = __s0_642 * splat_laneq_u32(__s1_642, __p2_642); \
-  __ret_642; \
-})
-#else
-#define vmul_laneq_u32(__p0_643, __p1_643, __p2_643) __extension__ ({ \
-  uint32x2_t __s0_643 = __p0_643; \
-  uint32x4_t __s1_643 = __p1_643; \
-  uint32x2_t __rev0_643;  __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, 1, 0); \
-  uint32x4_t __rev1_643;  __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 3, 2, 1, 0); \
-  uint32x2_t __ret_643; \
-  __ret_643 = __rev0_643 * __noswap_splat_laneq_u32(__rev1_643, __p2_643); \
-  __ret_643 = __builtin_shufflevector(__ret_643, __ret_643, 1, 0); \
-  __ret_643; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u16(__p0_644, __p1_644, __p2_644) __extension__ ({ \
-  uint16x4_t __s0_644 = __p0_644; \
-  uint16x8_t __s1_644 = __p1_644; \
-  uint16x4_t __ret_644; \
-  __ret_644 = __s0_644 * splat_laneq_u16(__s1_644, __p2_644); \
-  __ret_644; \
-})
-#else
-#define vmul_laneq_u16(__p0_645, __p1_645, __p2_645) __extension__ ({ \
-  uint16x4_t __s0_645 = __p0_645; \
-  uint16x8_t __s1_645 = __p1_645; \
-  uint16x4_t __rev0_645;  __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, 3, 2, 1, 0); \
-  uint16x8_t __rev1_645;  __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_645; \
-  __ret_645 = __rev0_645 * __noswap_splat_laneq_u16(__rev1_645, __p2_645); \
-  __ret_645 = __builtin_shufflevector(__ret_645, __ret_645, 3, 2, 1, 0); \
-  __ret_645; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f32(__p0_646, __p1_646, __p2_646) __extension__ ({ \
-  float32x2_t __s0_646 = __p0_646; \
-  float32x4_t __s1_646 = __p1_646; \
-  float32x2_t __ret_646; \
-  __ret_646 = __s0_646 * splat_laneq_f32(__s1_646, __p2_646); \
+#define vmulq_laneq_u32(__p0_646, __p1_646, __p2_646) __extension__ ({ \
+  uint32x4_t __s0_646 = __p0_646; \
+  uint32x4_t __s1_646 = __p1_646; \
+  uint32x4_t __ret_646; \
+  __ret_646 = __s0_646 * splatq_laneq_u32(__s1_646, __p2_646); \
   __ret_646; \
 })
 #else
-#define vmul_laneq_f32(__p0_647, __p1_647, __p2_647) __extension__ ({ \
-  float32x2_t __s0_647 = __p0_647; \
-  float32x4_t __s1_647 = __p1_647; \
-  float32x2_t __rev0_647;  __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 1, 0); \
-  float32x4_t __rev1_647;  __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 3, 2, 1, 0); \
-  float32x2_t __ret_647; \
-  __ret_647 = __rev0_647 * __noswap_splat_laneq_f32(__rev1_647, __p2_647); \
-  __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 1, 0); \
+#define vmulq_laneq_u32(__p0_647, __p1_647, __p2_647) __extension__ ({ \
+  uint32x4_t __s0_647 = __p0_647; \
+  uint32x4_t __s1_647 = __p1_647; \
+  uint32x4_t __rev0_647;  __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 3, 2, 1, 0); \
+  uint32x4_t __rev1_647;  __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 3, 2, 1, 0); \
+  uint32x4_t __ret_647; \
+  __ret_647 = __rev0_647 * __noswap_splatq_laneq_u32(__rev1_647, __p2_647); \
+  __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 3, 2, 1, 0); \
   __ret_647; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \
-  int32x2_t __s0_648 = __p0_648; \
-  int32x4_t __s1_648 = __p1_648; \
-  int32x2_t __ret_648; \
-  __ret_648 = __s0_648 * splat_laneq_s32(__s1_648, __p2_648); \
+#define vmulq_laneq_u16(__p0_648, __p1_648, __p2_648) __extension__ ({ \
+  uint16x8_t __s0_648 = __p0_648; \
+  uint16x8_t __s1_648 = __p1_648; \
+  uint16x8_t __ret_648; \
+  __ret_648 = __s0_648 * splatq_laneq_u16(__s1_648, __p2_648); \
   __ret_648; \
 })
 #else
-#define vmul_laneq_s32(__p0_649, __p1_649, __p2_649) __extension__ ({ \
-  int32x2_t __s0_649 = __p0_649; \
-  int32x4_t __s1_649 = __p1_649; \
-  int32x2_t __rev0_649;  __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 1, 0); \
-  int32x4_t __rev1_649;  __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 3, 2, 1, 0); \
-  int32x2_t __ret_649; \
-  __ret_649 = __rev0_649 * __noswap_splat_laneq_s32(__rev1_649, __p2_649); \
-  __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 1, 0); \
+#define vmulq_laneq_u16(__p0_649, __p1_649, __p2_649) __extension__ ({ \
+  uint16x8_t __s0_649 = __p0_649; \
+  uint16x8_t __s1_649 = __p1_649; \
+  uint16x8_t __rev0_649;  __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_649;  __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_649; \
+  __ret_649 = __rev0_649 * __noswap_splatq_laneq_u16(__rev1_649, __p2_649); \
+  __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret_649; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \
-  int16x4_t __s0_650 = __p0_650; \
-  int16x8_t __s1_650 = __p1_650; \
-  int16x4_t __ret_650; \
-  __ret_650 = __s0_650 * splat_laneq_s16(__s1_650, __p2_650); \
+#define vmulq_laneq_f64(__p0_650, __p1_650, __p2_650) __extension__ ({ \
+  float64x2_t __s0_650 = __p0_650; \
+  float64x2_t __s1_650 = __p1_650; \
+  float64x2_t __ret_650; \
+  __ret_650 = __s0_650 * splatq_laneq_f64(__s1_650, __p2_650); \
   __ret_650; \
 })
 #else
-#define vmul_laneq_s16(__p0_651, __p1_651, __p2_651) __extension__ ({ \
-  int16x4_t __s0_651 = __p0_651; \
-  int16x8_t __s1_651 = __p1_651; \
-  int16x4_t __rev0_651;  __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 3, 2, 1, 0); \
-  int16x8_t __rev1_651;  __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_651; \
-  __ret_651 = __rev0_651 * __noswap_splat_laneq_s16(__rev1_651, __p2_651); \
-  __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 3, 2, 1, 0); \
+#define vmulq_laneq_f64(__p0_651, __p1_651, __p2_651) __extension__ ({ \
+  float64x2_t __s0_651 = __p0_651; \
+  float64x2_t __s1_651 = __p1_651; \
+  float64x2_t __rev0_651;  __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 1, 0); \
+  float64x2_t __rev1_651;  __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 1, 0); \
+  float64x2_t __ret_651; \
+  __ret_651 = __rev0_651 * __noswap_splatq_laneq_f64(__rev1_651, __p2_651); \
+  __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 1, 0); \
   __ret_651; \
 })
 #endif
 
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_f32(__p0_652, __p1_652, __p2_652) __extension__ ({ \
+  float32x4_t __s0_652 = __p0_652; \
+  float32x4_t __s1_652 = __p1_652; \
+  float32x4_t __ret_652; \
+  __ret_652 = __s0_652 * splatq_laneq_f32(__s1_652, __p2_652); \
+  __ret_652; \
+})
+#else
+#define vmulq_laneq_f32(__p0_653, __p1_653, __p2_653) __extension__ ({ \
+  float32x4_t __s0_653 = __p0_653; \
+  float32x4_t __s1_653 = __p1_653; \
+  float32x4_t __rev0_653;  __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 3, 2, 1, 0); \
+  float32x4_t __rev1_653;  __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 3, 2, 1, 0); \
+  float32x4_t __ret_653; \
+  __ret_653 = __rev0_653 * __noswap_splatq_laneq_f32(__rev1_653, __p2_653); \
+  __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 3, 2, 1, 0); \
+  __ret_653; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_s32(__p0_654, __p1_654, __p2_654) __extension__ ({ \
+  int32x4_t __s0_654 = __p0_654; \
+  int32x4_t __s1_654 = __p1_654; \
+  int32x4_t __ret_654; \
+  __ret_654 = __s0_654 * splatq_laneq_s32(__s1_654, __p2_654); \
+  __ret_654; \
+})
+#else
+#define vmulq_laneq_s32(__p0_655, __p1_655, __p2_655) __extension__ ({ \
+  int32x4_t __s0_655 = __p0_655; \
+  int32x4_t __s1_655 = __p1_655; \
+  int32x4_t __rev0_655;  __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 3, 2, 1, 0); \
+  int32x4_t __rev1_655;  __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \
+  int32x4_t __ret_655; \
+  __ret_655 = __rev0_655 * __noswap_splatq_laneq_s32(__rev1_655, __p2_655); \
+  __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 3, 2, 1, 0); \
+  __ret_655; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_laneq_s16(__p0_656, __p1_656, __p2_656) __extension__ ({ \
+  int16x8_t __s0_656 = __p0_656; \
+  int16x8_t __s1_656 = __p1_656; \
+  int16x8_t __ret_656; \
+  __ret_656 = __s0_656 * splatq_laneq_s16(__s1_656, __p2_656); \
+  __ret_656; \
+})
+#else
+#define vmulq_laneq_s16(__p0_657, __p1_657, __p2_657) __extension__ ({ \
+  int16x8_t __s0_657 = __p0_657; \
+  int16x8_t __s1_657 = __p1_657; \
+  int16x8_t __rev0_657;  __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_657;  __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_657; \
+  __ret_657 = __rev0_657 * __noswap_splatq_laneq_s16(__rev1_657, __p2_657); \
+  __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_657; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_u32(__p0_658, __p1_658, __p2_658) __extension__ ({ \
+  uint32x2_t __s0_658 = __p0_658; \
+  uint32x4_t __s1_658 = __p1_658; \
+  uint32x2_t __ret_658; \
+  __ret_658 = __s0_658 * splat_laneq_u32(__s1_658, __p2_658); \
+  __ret_658; \
+})
+#else
+#define vmul_laneq_u32(__p0_659, __p1_659, __p2_659) __extension__ ({ \
+  uint32x2_t __s0_659 = __p0_659; \
+  uint32x4_t __s1_659 = __p1_659; \
+  uint32x2_t __rev0_659;  __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 1, 0); \
+  uint32x4_t __rev1_659;  __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \
+  uint32x2_t __ret_659; \
+  __ret_659 = __rev0_659 * __noswap_splat_laneq_u32(__rev1_659, __p2_659); \
+  __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 1, 0); \
+  __ret_659; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_u16(__p0_660, __p1_660, __p2_660) __extension__ ({ \
+  uint16x4_t __s0_660 = __p0_660; \
+  uint16x8_t __s1_660 = __p1_660; \
+  uint16x4_t __ret_660; \
+  __ret_660 = __s0_660 * splat_laneq_u16(__s1_660, __p2_660); \
+  __ret_660; \
+})
+#else
+#define vmul_laneq_u16(__p0_661, __p1_661, __p2_661) __extension__ ({ \
+  uint16x4_t __s0_661 = __p0_661; \
+  uint16x8_t __s1_661 = __p1_661; \
+  uint16x4_t __rev0_661;  __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \
+  uint16x8_t __rev1_661;  __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_661; \
+  __ret_661 = __rev0_661 * __noswap_splat_laneq_u16(__rev1_661, __p2_661); \
+  __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 3, 2, 1, 0); \
+  __ret_661; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_f32(__p0_662, __p1_662, __p2_662) __extension__ ({ \
+  float32x2_t __s0_662 = __p0_662; \
+  float32x4_t __s1_662 = __p1_662; \
+  float32x2_t __ret_662; \
+  __ret_662 = __s0_662 * splat_laneq_f32(__s1_662, __p2_662); \
+  __ret_662; \
+})
+#else
+#define vmul_laneq_f32(__p0_663, __p1_663, __p2_663) __extension__ ({ \
+  float32x2_t __s0_663 = __p0_663; \
+  float32x4_t __s1_663 = __p1_663; \
+  float32x2_t __rev0_663;  __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 1, 0); \
+  float32x4_t __rev1_663;  __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 3, 2, 1, 0); \
+  float32x2_t __ret_663; \
+  __ret_663 = __rev0_663 * __noswap_splat_laneq_f32(__rev1_663, __p2_663); \
+  __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 1, 0); \
+  __ret_663; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
+  int32x2_t __s0_664 = __p0_664; \
+  int32x4_t __s1_664 = __p1_664; \
+  int32x2_t __ret_664; \
+  __ret_664 = __s0_664 * splat_laneq_s32(__s1_664, __p2_664); \
+  __ret_664; \
+})
+#else
+#define vmul_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \
+  int32x2_t __s0_665 = __p0_665; \
+  int32x4_t __s1_665 = __p1_665; \
+  int32x2_t __rev0_665;  __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 1, 0); \
+  int32x4_t __rev1_665;  __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \
+  int32x2_t __ret_665; \
+  __ret_665 = __rev0_665 * __noswap_splat_laneq_s32(__rev1_665, __p2_665); \
+  __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \
+  __ret_665; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \
+  int16x4_t __s0_666 = __p0_666; \
+  int16x8_t __s1_666 = __p1_666; \
+  int16x4_t __ret_666; \
+  __ret_666 = __s0_666 * splat_laneq_s16(__s1_666, __p2_666); \
+  __ret_666; \
+})
+#else
+#define vmul_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
+  int16x4_t __s0_667 = __p0_667; \
+  int16x8_t __s1_667 = __p1_667; \
+  int16x4_t __rev0_667;  __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 3, 2, 1, 0); \
+  int16x8_t __rev1_667;  __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_667; \
+  __ret_667 = __rev0_667 * __noswap_splat_laneq_s16(__rev1_667, __p2_667); \
+  __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \
+  __ret_667; \
+})
+#endif
+
 __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
   float64x1_t __ret;
   __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1);
@@ -57053,170 +57241,170 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u32(__p0_652, __p1_652, __p2_652) __extension__ ({ \
-  uint32x4_t __s0_652 = __p0_652; \
-  uint32x2_t __s1_652 = __p1_652; \
-  uint64x2_t __ret_652; \
-  __ret_652 = vmull_u32(vget_high_u32(__s0_652), splat_lane_u32(__s1_652, __p2_652)); \
-  __ret_652; \
+#define vmull_high_lane_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \
+  uint32x4_t __s0_668 = __p0_668; \
+  uint32x2_t __s1_668 = __p1_668; \
+  uint64x2_t __ret_668; \
+  __ret_668 = vmull_u32(vget_high_u32(__s0_668), splat_lane_u32(__s1_668, __p2_668)); \
+  __ret_668; \
 })
 #else
-#define vmull_high_lane_u32(__p0_653, __p1_653, __p2_653) __extension__ ({ \
-  uint32x4_t __s0_653 = __p0_653; \
-  uint32x2_t __s1_653 = __p1_653; \
-  uint32x4_t __rev0_653;  __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 3, 2, 1, 0); \
-  uint32x2_t __rev1_653;  __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 1, 0); \
-  uint64x2_t __ret_653; \
-  __ret_653 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_653), __noswap_splat_lane_u32(__rev1_653, __p2_653)); \
-  __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 1, 0); \
-  __ret_653; \
+#define vmull_high_lane_u32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
+  uint32x4_t __s0_669 = __p0_669; \
+  uint32x2_t __s1_669 = __p1_669; \
+  uint32x4_t __rev0_669;  __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 3, 2, 1, 0); \
+  uint32x2_t __rev1_669;  __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 1, 0); \
+  uint64x2_t __ret_669; \
+  __ret_669 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_669), __noswap_splat_lane_u32(__rev1_669, __p2_669)); \
+  __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \
+  __ret_669; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u16(__p0_654, __p1_654, __p2_654) __extension__ ({ \
-  uint16x8_t __s0_654 = __p0_654; \
-  uint16x4_t __s1_654 = __p1_654; \
-  uint32x4_t __ret_654; \
-  __ret_654 = vmull_u16(vget_high_u16(__s0_654), splat_lane_u16(__s1_654, __p2_654)); \
-  __ret_654; \
+#define vmull_high_lane_u16(__p0_670, __p1_670, __p2_670) __extension__ ({ \
+  uint16x8_t __s0_670 = __p0_670; \
+  uint16x4_t __s1_670 = __p1_670; \
+  uint32x4_t __ret_670; \
+  __ret_670 = vmull_u16(vget_high_u16(__s0_670), splat_lane_u16(__s1_670, __p2_670)); \
+  __ret_670; \
 })
 #else
-#define vmull_high_lane_u16(__p0_655, __p1_655, __p2_655) __extension__ ({ \
-  uint16x8_t __s0_655 = __p0_655; \
-  uint16x4_t __s1_655 = __p1_655; \
-  uint16x8_t __rev0_655;  __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1_655;  __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \
-  uint32x4_t __ret_655; \
-  __ret_655 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_655), __noswap_splat_lane_u16(__rev1_655, __p2_655)); \
-  __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 3, 2, 1, 0); \
-  __ret_655; \
+#define vmull_high_lane_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \
+  uint16x8_t __s0_671 = __p0_671; \
+  uint16x4_t __s1_671 = __p1_671; \
+  uint16x8_t __rev0_671;  __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev1_671;  __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 3, 2, 1, 0); \
+  uint32x4_t __ret_671; \
+  __ret_671 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_671), __noswap_splat_lane_u16(__rev1_671, __p2_671)); \
+  __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \
+  __ret_671; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \
-  int32x4_t __s0_656 = __p0_656; \
-  int32x2_t __s1_656 = __p1_656; \
-  int64x2_t __ret_656; \
-  __ret_656 = vmull_s32(vget_high_s32(__s0_656), splat_lane_s32(__s1_656, __p2_656)); \
-  __ret_656; \
+#define vmull_high_lane_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \
+  int32x4_t __s0_672 = __p0_672; \
+  int32x2_t __s1_672 = __p1_672; \
+  int64x2_t __ret_672; \
+  __ret_672 = vmull_s32(vget_high_s32(__s0_672), splat_lane_s32(__s1_672, __p2_672)); \
+  __ret_672; \
 })
 #else
-#define vmull_high_lane_s32(__p0_657, __p1_657, __p2_657) __extension__ ({ \
-  int32x4_t __s0_657 = __p0_657; \
-  int32x2_t __s1_657 = __p1_657; \
-  int32x4_t __rev0_657;  __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 3, 2, 1, 0); \
-  int32x2_t __rev1_657;  __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 1, 0); \
-  int64x2_t __ret_657; \
-  __ret_657 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_657), __noswap_splat_lane_s32(__rev1_657, __p2_657)); \
-  __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 1, 0); \
-  __ret_657; \
+#define vmull_high_lane_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \
+  int32x4_t __s0_673 = __p0_673; \
+  int32x2_t __s1_673 = __p1_673; \
+  int32x4_t __rev0_673;  __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 3, 2, 1, 0); \
+  int32x2_t __rev1_673;  __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 1, 0); \
+  int64x2_t __ret_673; \
+  __ret_673 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_673), __noswap_splat_lane_s32(__rev1_673, __p2_673)); \
+  __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \
+  __ret_673; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \
-  int16x8_t __s0_658 = __p0_658; \
-  int16x4_t __s1_658 = __p1_658; \
-  int32x4_t __ret_658; \
-  __ret_658 = vmull_s16(vget_high_s16(__s0_658), splat_lane_s16(__s1_658, __p2_658)); \
-  __ret_658; \
+#define vmull_high_lane_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
+  int16x8_t __s0_674 = __p0_674; \
+  int16x4_t __s1_674 = __p1_674; \
+  int32x4_t __ret_674; \
+  __ret_674 = vmull_s16(vget_high_s16(__s0_674), splat_lane_s16(__s1_674, __p2_674)); \
+  __ret_674; \
 })
 #else
-#define vmull_high_lane_s16(__p0_659, __p1_659, __p2_659) __extension__ ({ \
-  int16x8_t __s0_659 = __p0_659; \
-  int16x4_t __s1_659 = __p1_659; \
-  int16x8_t __rev0_659;  __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_659;  __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \
-  int32x4_t __ret_659; \
-  __ret_659 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_659), __noswap_splat_lane_s16(__rev1_659, __p2_659)); \
-  __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 3, 2, 1, 0); \
-  __ret_659; \
+#define vmull_high_lane_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \
+  int16x8_t __s0_675 = __p0_675; \
+  int16x4_t __s1_675 = __p1_675; \
+  int16x8_t __rev0_675;  __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_675;  __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 3, 2, 1, 0); \
+  int32x4_t __ret_675; \
+  __ret_675 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_675), __noswap_splat_lane_s16(__rev1_675, __p2_675)); \
+  __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \
+  __ret_675; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u32(__p0_660, __p1_660, __p2_660) __extension__ ({ \
-  uint32x4_t __s0_660 = __p0_660; \
-  uint32x4_t __s1_660 = __p1_660; \
-  uint64x2_t __ret_660; \
-  __ret_660 = vmull_u32(vget_high_u32(__s0_660), splat_laneq_u32(__s1_660, __p2_660)); \
-  __ret_660; \
+#define vmull_high_laneq_u32(__p0_676, __p1_676, __p2_676) __extension__ ({ \
+  uint32x4_t __s0_676 = __p0_676; \
+  uint32x4_t __s1_676 = __p1_676; \
+  uint64x2_t __ret_676; \
+  __ret_676 = vmull_u32(vget_high_u32(__s0_676), splat_laneq_u32(__s1_676, __p2_676)); \
+  __ret_676; \
 })
 #else
-#define vmull_high_laneq_u32(__p0_661, __p1_661, __p2_661) __extension__ ({ \
-  uint32x4_t __s0_661 = __p0_661; \
-  uint32x4_t __s1_661 = __p1_661; \
-  uint32x4_t __rev0_661;  __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \
-  uint32x4_t __rev1_661;  __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 3, 2, 1, 0); \
-  uint64x2_t __ret_661; \
-  __ret_661 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_661), __noswap_splat_laneq_u32(__rev1_661, __p2_661)); \
-  __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 1, 0); \
-  __ret_661; \
+#define vmull_high_laneq_u32(__p0_677, __p1_677, __p2_677) __extension__ ({ \
+  uint32x4_t __s0_677 = __p0_677; \
+  uint32x4_t __s1_677 = __p1_677; \
+  uint32x4_t __rev0_677;  __rev0_677 = __builtin_shufflevector(__s0_677, __s0_677, 3, 2, 1, 0); \
+  uint32x4_t __rev1_677;  __rev1_677 = __builtin_shufflevector(__s1_677, __s1_677, 3, 2, 1, 0); \
+  uint64x2_t __ret_677; \
+  __ret_677 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_677), __noswap_splat_laneq_u32(__rev1_677, __p2_677)); \
+  __ret_677 = __builtin_shufflevector(__ret_677, __ret_677, 1, 0); \
+  __ret_677; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u16(__p0_662, __p1_662, __p2_662) __extension__ ({ \
-  uint16x8_t __s0_662 = __p0_662; \
-  uint16x8_t __s1_662 = __p1_662; \
-  uint32x4_t __ret_662; \
-  __ret_662 = vmull_u16(vget_high_u16(__s0_662), splat_laneq_u16(__s1_662, __p2_662)); \
-  __ret_662; \
+#define vmull_high_laneq_u16(__p0_678, __p1_678, __p2_678) __extension__ ({ \
+  uint16x8_t __s0_678 = __p0_678; \
+  uint16x8_t __s1_678 = __p1_678; \
+  uint32x4_t __ret_678; \
+  __ret_678 = vmull_u16(vget_high_u16(__s0_678), splat_laneq_u16(__s1_678, __p2_678)); \
+  __ret_678; \
 })
 #else
-#define vmull_high_laneq_u16(__p0_663, __p1_663, __p2_663) __extension__ ({ \
-  uint16x8_t __s0_663 = __p0_663; \
-  uint16x8_t __s1_663 = __p1_663; \
-  uint16x8_t __rev0_663;  __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_663;  __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_663; \
-  __ret_663 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_663), __noswap_splat_laneq_u16(__rev1_663, __p2_663)); \
-  __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 3, 2, 1, 0); \
-  __ret_663; \
+#define vmull_high_laneq_u16(__p0_679, __p1_679, __p2_679) __extension__ ({ \
+  uint16x8_t __s0_679 = __p0_679; \
+  uint16x8_t __s1_679 = __p1_679; \
+  uint16x8_t __rev0_679;  __rev0_679 = __builtin_shufflevector(__s0_679, __s0_679, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_679;  __rev1_679 = __builtin_shufflevector(__s1_679, __s1_679, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_679; \
+  __ret_679 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_679), __noswap_splat_laneq_u16(__rev1_679, __p2_679)); \
+  __ret_679 = __builtin_shufflevector(__ret_679, __ret_679, 3, 2, 1, 0); \
+  __ret_679; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
-  int32x4_t __s0_664 = __p0_664; \
-  int32x4_t __s1_664 = __p1_664; \
-  int64x2_t __ret_664; \
-  __ret_664 = vmull_s32(vget_high_s32(__s0_664), splat_laneq_s32(__s1_664, __p2_664)); \
-  __ret_664; \
+#define vmull_high_laneq_s32(__p0_680, __p1_680, __p2_680) __extension__ ({ \
+  int32x4_t __s0_680 = __p0_680; \
+  int32x4_t __s1_680 = __p1_680; \
+  int64x2_t __ret_680; \
+  __ret_680 = vmull_s32(vget_high_s32(__s0_680), splat_laneq_s32(__s1_680, __p2_680)); \
+  __ret_680; \
 })
 #else
-#define vmull_high_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \
-  int32x4_t __s0_665 = __p0_665; \
-  int32x4_t __s1_665 = __p1_665; \
-  int32x4_t __rev0_665;  __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 3, 2, 1, 0); \
-  int32x4_t __rev1_665;  __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \
-  int64x2_t __ret_665; \
-  __ret_665 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_665), __noswap_splat_laneq_s32(__rev1_665, __p2_665)); \
-  __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \
-  __ret_665; \
+#define vmull_high_laneq_s32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
+  int32x4_t __s0_681 = __p0_681; \
+  int32x4_t __s1_681 = __p1_681; \
+  int32x4_t __rev0_681;  __rev0_681 = __builtin_shufflevector(__s0_681, __s0_681, 3, 2, 1, 0); \
+  int32x4_t __rev1_681;  __rev1_681 = __builtin_shufflevector(__s1_681, __s1_681, 3, 2, 1, 0); \
+  int64x2_t __ret_681; \
+  __ret_681 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_681), __noswap_splat_laneq_s32(__rev1_681, __p2_681)); \
+  __ret_681 = __builtin_shufflevector(__ret_681, __ret_681, 1, 0); \
+  __ret_681; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \
-  int16x8_t __s0_666 = __p0_666; \
-  int16x8_t __s1_666 = __p1_666; \
-  int32x4_t __ret_666; \
-  __ret_666 = vmull_s16(vget_high_s16(__s0_666), splat_laneq_s16(__s1_666, __p2_666)); \
-  __ret_666; \
+#define vmull_high_laneq_s16(__p0_682, __p1_682, __p2_682) __extension__ ({ \
+  int16x8_t __s0_682 = __p0_682; \
+  int16x8_t __s1_682 = __p1_682; \
+  int32x4_t __ret_682; \
+  __ret_682 = vmull_s16(vget_high_s16(__s0_682), splat_laneq_s16(__s1_682, __p2_682)); \
+  __ret_682; \
 })
 #else
-#define vmull_high_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
-  int16x8_t __s0_667 = __p0_667; \
-  int16x8_t __s1_667 = __p1_667; \
-  int16x8_t __rev0_667;  __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_667;  __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_667; \
-  __ret_667 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_667), __noswap_splat_laneq_s16(__rev1_667, __p2_667)); \
-  __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \
-  __ret_667; \
+#define vmull_high_laneq_s16(__p0_683, __p1_683, __p2_683) __extension__ ({ \
+  int16x8_t __s0_683 = __p0_683; \
+  int16x8_t __s1_683 = __p1_683; \
+  int16x8_t __rev0_683;  __rev0_683 = __builtin_shufflevector(__s0_683, __s0_683, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_683;  __rev1_683 = __builtin_shufflevector(__s1_683, __s1_683, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_683; \
+  __ret_683 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_683), __noswap_splat_laneq_s16(__rev1_683, __p2_683)); \
+  __ret_683 = __builtin_shufflevector(__ret_683, __ret_683, 3, 2, 1, 0); \
+  __ret_683; \
 })
 #endif
 
@@ -57285,86 +57473,86 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \
-  uint32x2_t __s0_668 = __p0_668; \
-  uint32x4_t __s1_668 = __p1_668; \
-  uint64x2_t __ret_668; \
-  __ret_668 = vmull_u32(__s0_668, splat_laneq_u32(__s1_668, __p2_668)); \
-  __ret_668; \
+#define vmull_laneq_u32(__p0_684, __p1_684, __p2_684) __extension__ ({ \
+  uint32x2_t __s0_684 = __p0_684; \
+  uint32x4_t __s1_684 = __p1_684; \
+  uint64x2_t __ret_684; \
+  __ret_684 = vmull_u32(__s0_684, splat_laneq_u32(__s1_684, __p2_684)); \
+  __ret_684; \
 })
 #else
-#define vmull_laneq_u32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
-  uint32x2_t __s0_669 = __p0_669; \
-  uint32x4_t __s1_669 = __p1_669; \
-  uint32x2_t __rev0_669;  __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 1, 0); \
-  uint32x4_t __rev1_669;  __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 3, 2, 1, 0); \
-  uint64x2_t __ret_669; \
-  __ret_669 = __noswap_vmull_u32(__rev0_669, __noswap_splat_laneq_u32(__rev1_669, __p2_669)); \
-  __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \
-  __ret_669; \
+#define vmull_laneq_u32(__p0_685, __p1_685, __p2_685) __extension__ ({ \
+  uint32x2_t __s0_685 = __p0_685; \
+  uint32x4_t __s1_685 = __p1_685; \
+  uint32x2_t __rev0_685;  __rev0_685 = __builtin_shufflevector(__s0_685, __s0_685, 1, 0); \
+  uint32x4_t __rev1_685;  __rev1_685 = __builtin_shufflevector(__s1_685, __s1_685, 3, 2, 1, 0); \
+  uint64x2_t __ret_685; \
+  __ret_685 = __noswap_vmull_u32(__rev0_685, __noswap_splat_laneq_u32(__rev1_685, __p2_685)); \
+  __ret_685 = __builtin_shufflevector(__ret_685, __ret_685, 1, 0); \
+  __ret_685; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u16(__p0_670, __p1_670, __p2_670) __extension__ ({ \
-  uint16x4_t __s0_670 = __p0_670; \
-  uint16x8_t __s1_670 = __p1_670; \
-  uint32x4_t __ret_670; \
-  __ret_670 = vmull_u16(__s0_670, splat_laneq_u16(__s1_670, __p2_670)); \
-  __ret_670; \
+#define vmull_laneq_u16(__p0_686, __p1_686, __p2_686) __extension__ ({ \
+  uint16x4_t __s0_686 = __p0_686; \
+  uint16x8_t __s1_686 = __p1_686; \
+  uint32x4_t __ret_686; \
+  __ret_686 = vmull_u16(__s0_686, splat_laneq_u16(__s1_686, __p2_686)); \
+  __ret_686; \
 })
 #else
-#define vmull_laneq_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \
-  uint16x4_t __s0_671 = __p0_671; \
-  uint16x8_t __s1_671 = __p1_671; \
-  uint16x4_t __rev0_671;  __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 3, 2, 1, 0); \
-  uint16x8_t __rev1_671;  __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_671; \
-  __ret_671 = __noswap_vmull_u16(__rev0_671, __noswap_splat_laneq_u16(__rev1_671, __p2_671)); \
-  __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \
-  __ret_671; \
+#define vmull_laneq_u16(__p0_687, __p1_687, __p2_687) __extension__ ({ \
+  uint16x4_t __s0_687 = __p0_687; \
+  uint16x8_t __s1_687 = __p1_687; \
+  uint16x4_t __rev0_687;  __rev0_687 = __builtin_shufflevector(__s0_687, __s0_687, 3, 2, 1, 0); \
+  uint16x8_t __rev1_687;  __rev1_687 = __builtin_shufflevector(__s1_687, __s1_687, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_687; \
+  __ret_687 = __noswap_vmull_u16(__rev0_687, __noswap_splat_laneq_u16(__rev1_687, __p2_687)); \
+  __ret_687 = __builtin_shufflevector(__ret_687, __ret_687, 3, 2, 1, 0); \
+  __ret_687; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \
-  int32x2_t __s0_672 = __p0_672; \
-  int32x4_t __s1_672 = __p1_672; \
-  int64x2_t __ret_672; \
-  __ret_672 = vmull_s32(__s0_672, splat_laneq_s32(__s1_672, __p2_672)); \
-  __ret_672; \
+#define vmull_laneq_s32(__p0_688, __p1_688, __p2_688) __extension__ ({ \
+  int32x2_t __s0_688 = __p0_688; \
+  int32x4_t __s1_688 = __p1_688; \
+  int64x2_t __ret_688; \
+  __ret_688 = vmull_s32(__s0_688, splat_laneq_s32(__s1_688, __p2_688)); \
+  __ret_688; \
 })
 #else
-#define vmull_laneq_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \
-  int32x2_t __s0_673 = __p0_673; \
-  int32x4_t __s1_673 = __p1_673; \
-  int32x2_t __rev0_673;  __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 1, 0); \
-  int32x4_t __rev1_673;  __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 3, 2, 1, 0); \
-  int64x2_t __ret_673; \
-  __ret_673 = __noswap_vmull_s32(__rev0_673, __noswap_splat_laneq_s32(__rev1_673, __p2_673)); \
-  __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \
-  __ret_673; \
+#define vmull_laneq_s32(__p0_689, __p1_689, __p2_689) __extension__ ({ \
+  int32x2_t __s0_689 = __p0_689; \
+  int32x4_t __s1_689 = __p1_689; \
+  int32x2_t __rev0_689;  __rev0_689 = __builtin_shufflevector(__s0_689, __s0_689, 1, 0); \
+  int32x4_t __rev1_689;  __rev1_689 = __builtin_shufflevector(__s1_689, __s1_689, 3, 2, 1, 0); \
+  int64x2_t __ret_689; \
+  __ret_689 = __noswap_vmull_s32(__rev0_689, __noswap_splat_laneq_s32(__rev1_689, __p2_689)); \
+  __ret_689 = __builtin_shufflevector(__ret_689, __ret_689, 1, 0); \
+  __ret_689; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
-  int16x4_t __s0_674 = __p0_674; \
-  int16x8_t __s1_674 = __p1_674; \
-  int32x4_t __ret_674; \
-  __ret_674 = vmull_s16(__s0_674, splat_laneq_s16(__s1_674, __p2_674)); \
-  __ret_674; \
+#define vmull_laneq_s16(__p0_690, __p1_690, __p2_690) __extension__ ({ \
+  int16x4_t __s0_690 = __p0_690; \
+  int16x8_t __s1_690 = __p1_690; \
+  int32x4_t __ret_690; \
+  __ret_690 = vmull_s16(__s0_690, splat_laneq_s16(__s1_690, __p2_690)); \
+  __ret_690; \
 })
 #else
-#define vmull_laneq_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \
-  int16x4_t __s0_675 = __p0_675; \
-  int16x8_t __s1_675 = __p1_675; \
-  int16x4_t __rev0_675;  __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 3, 2, 1, 0); \
-  int16x8_t __rev1_675;  __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_675; \
-  __ret_675 = __noswap_vmull_s16(__rev0_675, __noswap_splat_laneq_s16(__rev1_675, __p2_675)); \
-  __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \
-  __ret_675; \
+#define vmull_laneq_s16(__p0_691, __p1_691, __p2_691) __extension__ ({ \
+  int16x4_t __s0_691 = __p0_691; \
+  int16x8_t __s1_691 = __p1_691; \
+  int16x4_t __rev0_691;  __rev0_691 = __builtin_shufflevector(__s0_691, __s0_691, 3, 2, 1, 0); \
+  int16x8_t __rev1_691;  __rev1_691 = __builtin_shufflevector(__s1_691, __s1_691, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_691; \
+  __ret_691 = __noswap_vmull_s16(__rev0_691, __noswap_splat_laneq_s16(__rev1_691, __p2_691)); \
+  __ret_691 = __builtin_shufflevector(__ret_691, __ret_691, 3, 2, 1, 0); \
+  __ret_691; \
 })
 #endif
 
@@ -57449,196 +57637,196 @@
   __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
   return __ret;
 }
-#define vmulxd_lane_f64(__p0_676, __p1_676, __p2_676) __extension__ ({ \
-  float64_t __s0_676 = __p0_676; \
-  float64x1_t __s1_676 = __p1_676; \
-  float64_t __ret_676; \
-  __ret_676 = vmulxd_f64(__s0_676, vget_lane_f64(__s1_676, __p2_676)); \
-  __ret_676; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_lane_f32(__p0_677, __p1_677, __p2_677) __extension__ ({ \
-  float32_t __s0_677 = __p0_677; \
-  float32x2_t __s1_677 = __p1_677; \
-  float32_t __ret_677; \
-  __ret_677 = vmulxs_f32(__s0_677, vget_lane_f32(__s1_677, __p2_677)); \
-  __ret_677; \
-})
-#else
-#define vmulxs_lane_f32(__p0_678, __p1_678, __p2_678) __extension__ ({ \
-  float32_t __s0_678 = __p0_678; \
-  float32x2_t __s1_678 = __p1_678; \
-  float32x2_t __rev1_678;  __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 1, 0); \
-  float32_t __ret_678; \
-  __ret_678 = vmulxs_f32(__s0_678, __noswap_vget_lane_f32(__rev1_678, __p2_678)); \
-  __ret_678; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f64(__p0_679, __p1_679, __p2_679) __extension__ ({ \
-  float64x2_t __s0_679 = __p0_679; \
-  float64x1_t __s1_679 = __p1_679; \
-  float64x2_t __ret_679; \
-  __ret_679 = vmulxq_f64(__s0_679, splatq_lane_f64(__s1_679, __p2_679)); \
-  __ret_679; \
-})
-#else
-#define vmulxq_lane_f64(__p0_680, __p1_680, __p2_680) __extension__ ({ \
-  float64x2_t __s0_680 = __p0_680; \
-  float64x1_t __s1_680 = __p1_680; \
-  float64x2_t __rev0_680;  __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 1, 0); \
-  float64x2_t __ret_680; \
-  __ret_680 = __noswap_vmulxq_f64(__rev0_680, __noswap_splatq_lane_f64(__s1_680, __p2_680)); \
-  __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 1, 0); \
-  __ret_680; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
-  float32x4_t __s0_681 = __p0_681; \
-  float32x2_t __s1_681 = __p1_681; \
-  float32x4_t __ret_681; \
-  __ret_681 = vmulxq_f32(__s0_681, splatq_lane_f32(__s1_681, __p2_681)); \
-  __ret_681; \
-})
-#else
-#define vmulxq_lane_f32(__p0_682, __p1_682, __p2_682) __extension__ ({ \
-  float32x4_t __s0_682 = __p0_682; \
-  float32x2_t __s1_682 = __p1_682; \
-  float32x4_t __rev0_682;  __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 3, 2, 1, 0); \
-  float32x2_t __rev1_682;  __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 1, 0); \
-  float32x4_t __ret_682; \
-  __ret_682 = __noswap_vmulxq_f32(__rev0_682, __noswap_splatq_lane_f32(__rev1_682, __p2_682)); \
-  __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 3, 2, 1, 0); \
-  __ret_682; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f32(__p0_683, __p1_683, __p2_683) __extension__ ({ \
-  float32x2_t __s0_683 = __p0_683; \
-  float32x2_t __s1_683 = __p1_683; \
-  float32x2_t __ret_683; \
-  __ret_683 = vmulx_f32(__s0_683, splat_lane_f32(__s1_683, __p2_683)); \
-  __ret_683; \
-})
-#else
-#define vmulx_lane_f32(__p0_684, __p1_684, __p2_684) __extension__ ({ \
-  float32x2_t __s0_684 = __p0_684; \
-  float32x2_t __s1_684 = __p1_684; \
-  float32x2_t __rev0_684;  __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 1, 0); \
-  float32x2_t __rev1_684;  __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 1, 0); \
-  float32x2_t __ret_684; \
-  __ret_684 = __noswap_vmulx_f32(__rev0_684, __noswap_splat_lane_f32(__rev1_684, __p2_684)); \
-  __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 1, 0); \
-  __ret_684; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxd_laneq_f64(__p0_685, __p1_685, __p2_685) __extension__ ({ \
-  float64_t __s0_685 = __p0_685; \
-  float64x2_t __s1_685 = __p1_685; \
-  float64_t __ret_685; \
-  __ret_685 = vmulxd_f64(__s0_685, vgetq_lane_f64(__s1_685, __p2_685)); \
-  __ret_685; \
-})
-#else
-#define vmulxd_laneq_f64(__p0_686, __p1_686, __p2_686) __extension__ ({ \
-  float64_t __s0_686 = __p0_686; \
-  float64x2_t __s1_686 = __p1_686; \
-  float64x2_t __rev1_686;  __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 1, 0); \
-  float64_t __ret_686; \
-  __ret_686 = vmulxd_f64(__s0_686, __noswap_vgetq_lane_f64(__rev1_686, __p2_686)); \
-  __ret_686; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_laneq_f32(__p0_687, __p1_687, __p2_687) __extension__ ({ \
-  float32_t __s0_687 = __p0_687; \
-  float32x4_t __s1_687 = __p1_687; \
-  float32_t __ret_687; \
-  __ret_687 = vmulxs_f32(__s0_687, vgetq_lane_f32(__s1_687, __p2_687)); \
-  __ret_687; \
-})
-#else
-#define vmulxs_laneq_f32(__p0_688, __p1_688, __p2_688) __extension__ ({ \
-  float32_t __s0_688 = __p0_688; \
-  float32x4_t __s1_688 = __p1_688; \
-  float32x4_t __rev1_688;  __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 3, 2, 1, 0); \
-  float32_t __ret_688; \
-  __ret_688 = vmulxs_f32(__s0_688, __noswap_vgetq_lane_f32(__rev1_688, __p2_688)); \
-  __ret_688; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f64(__p0_689, __p1_689, __p2_689) __extension__ ({ \
-  float64x2_t __s0_689 = __p0_689; \
-  float64x2_t __s1_689 = __p1_689; \
-  float64x2_t __ret_689; \
-  __ret_689 = vmulxq_f64(__s0_689, splatq_laneq_f64(__s1_689, __p2_689)); \
-  __ret_689; \
-})
-#else
-#define vmulxq_laneq_f64(__p0_690, __p1_690, __p2_690) __extension__ ({ \
-  float64x2_t __s0_690 = __p0_690; \
-  float64x2_t __s1_690 = __p1_690; \
-  float64x2_t __rev0_690;  __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 1, 0); \
-  float64x2_t __rev1_690;  __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 1, 0); \
-  float64x2_t __ret_690; \
-  __ret_690 = __noswap_vmulxq_f64(__rev0_690, __noswap_splatq_laneq_f64(__rev1_690, __p2_690)); \
-  __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 1, 0); \
-  __ret_690; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f32(__p0_691, __p1_691, __p2_691) __extension__ ({ \
-  float32x4_t __s0_691 = __p0_691; \
-  float32x4_t __s1_691 = __p1_691; \
-  float32x4_t __ret_691; \
-  __ret_691 = vmulxq_f32(__s0_691, splatq_laneq_f32(__s1_691, __p2_691)); \
-  __ret_691; \
-})
-#else
-#define vmulxq_laneq_f32(__p0_692, __p1_692, __p2_692) __extension__ ({ \
-  float32x4_t __s0_692 = __p0_692; \
-  float32x4_t __s1_692 = __p1_692; \
-  float32x4_t __rev0_692;  __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 3, 2, 1, 0); \
-  float32x4_t __rev1_692;  __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 3, 2, 1, 0); \
-  float32x4_t __ret_692; \
-  __ret_692 = __noswap_vmulxq_f32(__rev0_692, __noswap_splatq_laneq_f32(__rev1_692, __p2_692)); \
-  __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 3, 2, 1, 0); \
+#define vmulxd_lane_f64(__p0_692, __p1_692, __p2_692) __extension__ ({ \
+  float64_t __s0_692 = __p0_692; \
+  float64x1_t __s1_692 = __p1_692; \
+  float64_t __ret_692; \
+  __ret_692 = vmulxd_f64(__s0_692, vget_lane_f64(__s1_692, __p2_692)); \
   __ret_692; \
 })
-#endif
-
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
-  float32x2_t __s0_693 = __p0_693; \
-  float32x4_t __s1_693 = __p1_693; \
-  float32x2_t __ret_693; \
-  __ret_693 = vmulx_f32(__s0_693, splat_laneq_f32(__s1_693, __p2_693)); \
+#define vmulxs_lane_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
+  float32_t __s0_693 = __p0_693; \
+  float32x2_t __s1_693 = __p1_693; \
+  float32_t __ret_693; \
+  __ret_693 = vmulxs_f32(__s0_693, vget_lane_f32(__s1_693, __p2_693)); \
   __ret_693; \
 })
 #else
-#define vmulx_laneq_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
-  float32x2_t __s0_694 = __p0_694; \
-  float32x4_t __s1_694 = __p1_694; \
-  float32x2_t __rev0_694;  __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 1, 0); \
-  float32x4_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 3, 2, 1, 0); \
-  float32x2_t __ret_694; \
-  __ret_694 = __noswap_vmulx_f32(__rev0_694, __noswap_splat_laneq_f32(__rev1_694, __p2_694)); \
-  __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 1, 0); \
+#define vmulxs_lane_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
+  float32_t __s0_694 = __p0_694; \
+  float32x2_t __s1_694 = __p1_694; \
+  float32x2_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 1, 0); \
+  float32_t __ret_694; \
+  __ret_694 = vmulxs_f32(__s0_694, __noswap_vget_lane_f32(__rev1_694, __p2_694)); \
   __ret_694; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vmulxq_lane_f64(__p0_695, __p1_695, __p2_695) __extension__ ({ \
+  float64x2_t __s0_695 = __p0_695; \
+  float64x1_t __s1_695 = __p1_695; \
+  float64x2_t __ret_695; \
+  __ret_695 = vmulxq_f64(__s0_695, splatq_lane_f64(__s1_695, __p2_695)); \
+  __ret_695; \
+})
+#else
+#define vmulxq_lane_f64(__p0_696, __p1_696, __p2_696) __extension__ ({ \
+  float64x2_t __s0_696 = __p0_696; \
+  float64x1_t __s1_696 = __p1_696; \
+  float64x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
+  float64x2_t __ret_696; \
+  __ret_696 = __noswap_vmulxq_f64(__rev0_696, __noswap_splatq_lane_f64(__s1_696, __p2_696)); \
+  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \
+  __ret_696; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_lane_f32(__p0_697, __p1_697, __p2_697) __extension__ ({ \
+  float32x4_t __s0_697 = __p0_697; \
+  float32x2_t __s1_697 = __p1_697; \
+  float32x4_t __ret_697; \
+  __ret_697 = vmulxq_f32(__s0_697, splatq_lane_f32(__s1_697, __p2_697)); \
+  __ret_697; \
+})
+#else
+#define vmulxq_lane_f32(__p0_698, __p1_698, __p2_698) __extension__ ({ \
+  float32x4_t __s0_698 = __p0_698; \
+  float32x2_t __s1_698 = __p1_698; \
+  float32x4_t __rev0_698;  __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \
+  float32x2_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 1, 0); \
+  float32x4_t __ret_698; \
+  __ret_698 = __noswap_vmulxq_f32(__rev0_698, __noswap_splatq_lane_f32(__rev1_698, __p2_698)); \
+  __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 3, 2, 1, 0); \
+  __ret_698; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_lane_f32(__p0_699, __p1_699, __p2_699) __extension__ ({ \
+  float32x2_t __s0_699 = __p0_699; \
+  float32x2_t __s1_699 = __p1_699; \
+  float32x2_t __ret_699; \
+  __ret_699 = vmulx_f32(__s0_699, splat_lane_f32(__s1_699, __p2_699)); \
+  __ret_699; \
+})
+#else
+#define vmulx_lane_f32(__p0_700, __p1_700, __p2_700) __extension__ ({ \
+  float32x2_t __s0_700 = __p0_700; \
+  float32x2_t __s1_700 = __p1_700; \
+  float32x2_t __rev0_700;  __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \
+  float32x2_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 1, 0); \
+  float32x2_t __ret_700; \
+  __ret_700 = __noswap_vmulx_f32(__rev0_700, __noswap_splat_lane_f32(__rev1_700, __p2_700)); \
+  __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 1, 0); \
+  __ret_700; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxd_laneq_f64(__p0_701, __p1_701, __p2_701) __extension__ ({ \
+  float64_t __s0_701 = __p0_701; \
+  float64x2_t __s1_701 = __p1_701; \
+  float64_t __ret_701; \
+  __ret_701 = vmulxd_f64(__s0_701, vgetq_lane_f64(__s1_701, __p2_701)); \
+  __ret_701; \
+})
+#else
+#define vmulxd_laneq_f64(__p0_702, __p1_702, __p2_702) __extension__ ({ \
+  float64_t __s0_702 = __p0_702; \
+  float64x2_t __s1_702 = __p1_702; \
+  float64x2_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 1, 0); \
+  float64_t __ret_702; \
+  __ret_702 = vmulxd_f64(__s0_702, __noswap_vgetq_lane_f64(__rev1_702, __p2_702)); \
+  __ret_702; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxs_laneq_f32(__p0_703, __p1_703, __p2_703) __extension__ ({ \
+  float32_t __s0_703 = __p0_703; \
+  float32x4_t __s1_703 = __p1_703; \
+  float32_t __ret_703; \
+  __ret_703 = vmulxs_f32(__s0_703, vgetq_lane_f32(__s1_703, __p2_703)); \
+  __ret_703; \
+})
+#else
+#define vmulxs_laneq_f32(__p0_704, __p1_704, __p2_704) __extension__ ({ \
+  float32_t __s0_704 = __p0_704; \
+  float32x4_t __s1_704 = __p1_704; \
+  float32x4_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 3, 2, 1, 0); \
+  float32_t __ret_704; \
+  __ret_704 = vmulxs_f32(__s0_704, __noswap_vgetq_lane_f32(__rev1_704, __p2_704)); \
+  __ret_704; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_laneq_f64(__p0_705, __p1_705, __p2_705) __extension__ ({ \
+  float64x2_t __s0_705 = __p0_705; \
+  float64x2_t __s1_705 = __p1_705; \
+  float64x2_t __ret_705; \
+  __ret_705 = vmulxq_f64(__s0_705, splatq_laneq_f64(__s1_705, __p2_705)); \
+  __ret_705; \
+})
+#else
+#define vmulxq_laneq_f64(__p0_706, __p1_706, __p2_706) __extension__ ({ \
+  float64x2_t __s0_706 = __p0_706; \
+  float64x2_t __s1_706 = __p1_706; \
+  float64x2_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 1, 0); \
+  float64x2_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 1, 0); \
+  float64x2_t __ret_706; \
+  __ret_706 = __noswap_vmulxq_f64(__rev0_706, __noswap_splatq_laneq_f64(__rev1_706, __p2_706)); \
+  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 1, 0); \
+  __ret_706; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulxq_laneq_f32(__p0_707, __p1_707, __p2_707) __extension__ ({ \
+  float32x4_t __s0_707 = __p0_707; \
+  float32x4_t __s1_707 = __p1_707; \
+  float32x4_t __ret_707; \
+  __ret_707 = vmulxq_f32(__s0_707, splatq_laneq_f32(__s1_707, __p2_707)); \
+  __ret_707; \
+})
+#else
+#define vmulxq_laneq_f32(__p0_708, __p1_708, __p2_708) __extension__ ({ \
+  float32x4_t __s0_708 = __p0_708; \
+  float32x4_t __s1_708 = __p1_708; \
+  float32x4_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 3, 2, 1, 0); \
+  float32x4_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \
+  float32x4_t __ret_708; \
+  __ret_708 = __noswap_vmulxq_f32(__rev0_708, __noswap_splatq_laneq_f32(__rev1_708, __p2_708)); \
+  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 3, 2, 1, 0); \
+  __ret_708; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulx_laneq_f32(__p0_709, __p1_709, __p2_709) __extension__ ({ \
+  float32x2_t __s0_709 = __p0_709; \
+  float32x4_t __s1_709 = __p1_709; \
+  float32x2_t __ret_709; \
+  __ret_709 = vmulx_f32(__s0_709, splat_laneq_f32(__s1_709, __p2_709)); \
+  __ret_709; \
+})
+#else
+#define vmulx_laneq_f32(__p0_710, __p1_710, __p2_710) __extension__ ({ \
+  float32x2_t __s0_710 = __p0_710; \
+  float32x4_t __s1_710 = __p1_710; \
+  float32x2_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 1, 0); \
+  float32x4_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 3, 2, 1, 0); \
+  float32x2_t __ret_710; \
+  __ret_710 = __noswap_vmulx_f32(__rev0_710, __noswap_splat_laneq_f32(__rev1_710, __p2_710)); \
+  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 1, 0); \
+  __ret_710; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai float64x2_t vnegq_f64(float64x2_t __p0) {
   float64x2_t __ret;
   __ret = -__p0;
@@ -58537,98 +58725,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s32(__p0_695, __p1_695, __p2_695, __p3_695) __extension__ ({ \
-  int64x2_t __s0_695 = __p0_695; \
-  int32x4_t __s1_695 = __p1_695; \
-  int32x2_t __s2_695 = __p2_695; \
-  int64x2_t __ret_695; \
-  __ret_695 = vqdmlal_s32(__s0_695, vget_high_s32(__s1_695), splat_lane_s32(__s2_695, __p3_695)); \
-  __ret_695; \
+#define vqdmlal_high_lane_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \
+  int64x2_t __s0_711 = __p0_711; \
+  int32x4_t __s1_711 = __p1_711; \
+  int32x2_t __s2_711 = __p2_711; \
+  int64x2_t __ret_711; \
+  __ret_711 = vqdmlal_s32(__s0_711, vget_high_s32(__s1_711), splat_lane_s32(__s2_711, __p3_711)); \
+  __ret_711; \
 })
 #else
-#define vqdmlal_high_lane_s32(__p0_696, __p1_696, __p2_696, __p3_696) __extension__ ({ \
-  int64x2_t __s0_696 = __p0_696; \
-  int32x4_t __s1_696 = __p1_696; \
-  int32x2_t __s2_696 = __p2_696; \
-  int64x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
-  int32x4_t __rev1_696;  __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 3, 2, 1, 0); \
-  int32x2_t __rev2_696;  __rev2_696 = __builtin_shufflevector(__s2_696, __s2_696, 1, 0); \
-  int64x2_t __ret_696; \
-  __ret_696 = __noswap_vqdmlal_s32(__rev0_696, __noswap_vget_high_s32(__rev1_696), __noswap_splat_lane_s32(__rev2_696, __p3_696)); \
-  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \
-  __ret_696; \
+#define vqdmlal_high_lane_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \
+  int64x2_t __s0_712 = __p0_712; \
+  int32x4_t __s1_712 = __p1_712; \
+  int32x2_t __s2_712 = __p2_712; \
+  int64x2_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \
+  int32x4_t __rev1_712;  __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \
+  int32x2_t __rev2_712;  __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 1, 0); \
+  int64x2_t __ret_712; \
+  __ret_712 = __noswap_vqdmlal_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_lane_s32(__rev2_712, __p3_712)); \
+  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \
+  __ret_712; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s16(__p0_697, __p1_697, __p2_697, __p3_697) __extension__ ({ \
-  int32x4_t __s0_697 = __p0_697; \
-  int16x8_t __s1_697 = __p1_697; \
-  int16x4_t __s2_697 = __p2_697; \
-  int32x4_t __ret_697; \
-  __ret_697 = vqdmlal_s16(__s0_697, vget_high_s16(__s1_697), splat_lane_s16(__s2_697, __p3_697)); \
-  __ret_697; \
+#define vqdmlal_high_lane_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \
+  int32x4_t __s0_713 = __p0_713; \
+  int16x8_t __s1_713 = __p1_713; \
+  int16x4_t __s2_713 = __p2_713; \
+  int32x4_t __ret_713; \
+  __ret_713 = vqdmlal_s16(__s0_713, vget_high_s16(__s1_713), splat_lane_s16(__s2_713, __p3_713)); \
+  __ret_713; \
 })
 #else
-#define vqdmlal_high_lane_s16(__p0_698, __p1_698, __p2_698, __p3_698) __extension__ ({ \
-  int32x4_t __s0_698 = __p0_698; \
-  int16x8_t __s1_698 = __p1_698; \
-  int16x4_t __s2_698 = __p2_698; \
-  int32x4_t __rev0_698;  __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \
-  int16x8_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_698;  __rev2_698 = __builtin_shufflevector(__s2_698, __s2_698, 3, 2, 1, 0); \
-  int32x4_t __ret_698; \
-  __ret_698 = __noswap_vqdmlal_s16(__rev0_698, __noswap_vget_high_s16(__rev1_698), __noswap_splat_lane_s16(__rev2_698, __p3_698)); \
-  __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 3, 2, 1, 0); \
-  __ret_698; \
+#define vqdmlal_high_lane_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \
+  int32x4_t __s0_714 = __p0_714; \
+  int16x8_t __s1_714 = __p1_714; \
+  int16x4_t __s2_714 = __p2_714; \
+  int32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
+  int16x8_t __rev1_714;  __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_714;  __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 3, 2, 1, 0); \
+  int32x4_t __ret_714; \
+  __ret_714 = __noswap_vqdmlal_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_lane_s16(__rev2_714, __p3_714)); \
+  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \
+  __ret_714; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s32(__p0_699, __p1_699, __p2_699, __p3_699) __extension__ ({ \
-  int64x2_t __s0_699 = __p0_699; \
-  int32x4_t __s1_699 = __p1_699; \
-  int32x4_t __s2_699 = __p2_699; \
-  int64x2_t __ret_699; \
-  __ret_699 = vqdmlal_s32(__s0_699, vget_high_s32(__s1_699), splat_laneq_s32(__s2_699, __p3_699)); \
-  __ret_699; \
+#define vqdmlal_high_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \
+  int64x2_t __s0_715 = __p0_715; \
+  int32x4_t __s1_715 = __p1_715; \
+  int32x4_t __s2_715 = __p2_715; \
+  int64x2_t __ret_715; \
+  __ret_715 = vqdmlal_s32(__s0_715, vget_high_s32(__s1_715), splat_laneq_s32(__s2_715, __p3_715)); \
+  __ret_715; \
 })
 #else
-#define vqdmlal_high_laneq_s32(__p0_700, __p1_700, __p2_700, __p3_700) __extension__ ({ \
-  int64x2_t __s0_700 = __p0_700; \
-  int32x4_t __s1_700 = __p1_700; \
-  int32x4_t __s2_700 = __p2_700; \
-  int64x2_t __rev0_700;  __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \
-  int32x4_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 3, 2, 1, 0); \
-  int32x4_t __rev2_700;  __rev2_700 = __builtin_shufflevector(__s2_700, __s2_700, 3, 2, 1, 0); \
-  int64x2_t __ret_700; \
-  __ret_700 = __noswap_vqdmlal_s32(__rev0_700, __noswap_vget_high_s32(__rev1_700), __noswap_splat_laneq_s32(__rev2_700, __p3_700)); \
-  __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 1, 0); \
-  __ret_700; \
+#define vqdmlal_high_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \
+  int64x2_t __s0_716 = __p0_716; \
+  int32x4_t __s1_716 = __p1_716; \
+  int32x4_t __s2_716 = __p2_716; \
+  int64x2_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \
+  int32x4_t __rev1_716;  __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 3, 2, 1, 0); \
+  int32x4_t __rev2_716;  __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \
+  int64x2_t __ret_716; \
+  __ret_716 = __noswap_vqdmlal_s32(__rev0_716, __noswap_vget_high_s32(__rev1_716), __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \
+  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \
+  __ret_716; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s16(__p0_701, __p1_701, __p2_701, __p3_701) __extension__ ({ \
-  int32x4_t __s0_701 = __p0_701; \
-  int16x8_t __s1_701 = __p1_701; \
-  int16x8_t __s2_701 = __p2_701; \
-  int32x4_t __ret_701; \
-  __ret_701 = vqdmlal_s16(__s0_701, vget_high_s16(__s1_701), splat_laneq_s16(__s2_701, __p3_701)); \
-  __ret_701; \
+#define vqdmlal_high_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \
+  int32x4_t __s0_717 = __p0_717; \
+  int16x8_t __s1_717 = __p1_717; \
+  int16x8_t __s2_717 = __p2_717; \
+  int32x4_t __ret_717; \
+  __ret_717 = vqdmlal_s16(__s0_717, vget_high_s16(__s1_717), splat_laneq_s16(__s2_717, __p3_717)); \
+  __ret_717; \
 })
 #else
-#define vqdmlal_high_laneq_s16(__p0_702, __p1_702, __p2_702, __p3_702) __extension__ ({ \
-  int32x4_t __s0_702 = __p0_702; \
-  int16x8_t __s1_702 = __p1_702; \
-  int16x8_t __s2_702 = __p2_702; \
-  int32x4_t __rev0_702;  __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 3, 2, 1, 0); \
-  int16x8_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_702;  __rev2_702 = __builtin_shufflevector(__s2_702, __s2_702, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_702; \
-  __ret_702 = __noswap_vqdmlal_s16(__rev0_702, __noswap_vget_high_s16(__rev1_702), __noswap_splat_laneq_s16(__rev2_702, __p3_702)); \
-  __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 3, 2, 1, 0); \
-  __ret_702; \
+#define vqdmlal_high_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \
+  int32x4_t __s0_718 = __p0_718; \
+  int16x8_t __s1_718 = __p1_718; \
+  int16x8_t __s2_718 = __p2_718; \
+  int32x4_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \
+  int16x8_t __rev1_718;  __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_718;  __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_718; \
+  __ret_718 = __noswap_vqdmlal_s16(__rev0_718, __noswap_vget_high_s16(__rev1_718), __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \
+  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \
+  __ret_718; \
 })
 #endif
 
@@ -58751,50 +58939,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s32(__p0_703, __p1_703, __p2_703, __p3_703) __extension__ ({ \
-  int64x2_t __s0_703 = __p0_703; \
-  int32x2_t __s1_703 = __p1_703; \
-  int32x4_t __s2_703 = __p2_703; \
-  int64x2_t __ret_703; \
-  __ret_703 = vqdmlal_s32(__s0_703, __s1_703, splat_laneq_s32(__s2_703, __p3_703)); \
-  __ret_703; \
+#define vqdmlal_laneq_s32(__p0_719, __p1_719, __p2_719, __p3_719) __extension__ ({ \
+  int64x2_t __s0_719 = __p0_719; \
+  int32x2_t __s1_719 = __p1_719; \
+  int32x4_t __s2_719 = __p2_719; \
+  int64x2_t __ret_719; \
+  __ret_719 = vqdmlal_s32(__s0_719, __s1_719, splat_laneq_s32(__s2_719, __p3_719)); \
+  __ret_719; \
 })
 #else
-#define vqdmlal_laneq_s32(__p0_704, __p1_704, __p2_704, __p3_704) __extension__ ({ \
-  int64x2_t __s0_704 = __p0_704; \
-  int32x2_t __s1_704 = __p1_704; \
-  int32x4_t __s2_704 = __p2_704; \
-  int64x2_t __rev0_704;  __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 1, 0); \
-  int32x2_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 1, 0); \
-  int32x4_t __rev2_704;  __rev2_704 = __builtin_shufflevector(__s2_704, __s2_704, 3, 2, 1, 0); \
-  int64x2_t __ret_704; \
-  __ret_704 = __noswap_vqdmlal_s32(__rev0_704, __rev1_704, __noswap_splat_laneq_s32(__rev2_704, __p3_704)); \
-  __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 1, 0); \
-  __ret_704; \
+#define vqdmlal_laneq_s32(__p0_720, __p1_720, __p2_720, __p3_720) __extension__ ({ \
+  int64x2_t __s0_720 = __p0_720; \
+  int32x2_t __s1_720 = __p1_720; \
+  int32x4_t __s2_720 = __p2_720; \
+  int64x2_t __rev0_720;  __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 1, 0); \
+  int32x2_t __rev1_720;  __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 1, 0); \
+  int32x4_t __rev2_720;  __rev2_720 = __builtin_shufflevector(__s2_720, __s2_720, 3, 2, 1, 0); \
+  int64x2_t __ret_720; \
+  __ret_720 = __noswap_vqdmlal_s32(__rev0_720, __rev1_720, __noswap_splat_laneq_s32(__rev2_720, __p3_720)); \
+  __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 1, 0); \
+  __ret_720; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s16(__p0_705, __p1_705, __p2_705, __p3_705) __extension__ ({ \
-  int32x4_t __s0_705 = __p0_705; \
-  int16x4_t __s1_705 = __p1_705; \
-  int16x8_t __s2_705 = __p2_705; \
-  int32x4_t __ret_705; \
-  __ret_705 = vqdmlal_s16(__s0_705, __s1_705, splat_laneq_s16(__s2_705, __p3_705)); \
-  __ret_705; \
+#define vqdmlal_laneq_s16(__p0_721, __p1_721, __p2_721, __p3_721) __extension__ ({ \
+  int32x4_t __s0_721 = __p0_721; \
+  int16x4_t __s1_721 = __p1_721; \
+  int16x8_t __s2_721 = __p2_721; \
+  int32x4_t __ret_721; \
+  __ret_721 = vqdmlal_s16(__s0_721, __s1_721, splat_laneq_s16(__s2_721, __p3_721)); \
+  __ret_721; \
 })
 #else
-#define vqdmlal_laneq_s16(__p0_706, __p1_706, __p2_706, __p3_706) __extension__ ({ \
-  int32x4_t __s0_706 = __p0_706; \
-  int16x4_t __s1_706 = __p1_706; \
-  int16x8_t __s2_706 = __p2_706; \
-  int32x4_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 3, 2, 1, 0); \
-  int16x4_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 3, 2, 1, 0); \
-  int16x8_t __rev2_706;  __rev2_706 = __builtin_shufflevector(__s2_706, __s2_706, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_706; \
-  __ret_706 = __noswap_vqdmlal_s16(__rev0_706, __rev1_706, __noswap_splat_laneq_s16(__rev2_706, __p3_706)); \
-  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 3, 2, 1, 0); \
-  __ret_706; \
+#define vqdmlal_laneq_s16(__p0_722, __p1_722, __p2_722, __p3_722) __extension__ ({ \
+  int32x4_t __s0_722 = __p0_722; \
+  int16x4_t __s1_722 = __p1_722; \
+  int16x8_t __s2_722 = __p2_722; \
+  int32x4_t __rev0_722;  __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 3, 2, 1, 0); \
+  int16x4_t __rev1_722;  __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 3, 2, 1, 0); \
+  int16x8_t __rev2_722;  __rev2_722 = __builtin_shufflevector(__s2_722, __s2_722, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_722; \
+  __ret_722 = __noswap_vqdmlal_s16(__rev0_722, __rev1_722, __noswap_splat_laneq_s16(__rev2_722, __p3_722)); \
+  __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 3, 2, 1, 0); \
+  __ret_722; \
 })
 #endif
 
@@ -58845,98 +59033,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s32(__p0_707, __p1_707, __p2_707, __p3_707) __extension__ ({ \
-  int64x2_t __s0_707 = __p0_707; \
-  int32x4_t __s1_707 = __p1_707; \
-  int32x2_t __s2_707 = __p2_707; \
-  int64x2_t __ret_707; \
-  __ret_707 = vqdmlsl_s32(__s0_707, vget_high_s32(__s1_707), splat_lane_s32(__s2_707, __p3_707)); \
-  __ret_707; \
+#define vqdmlsl_high_lane_s32(__p0_723, __p1_723, __p2_723, __p3_723) __extension__ ({ \
+  int64x2_t __s0_723 = __p0_723; \
+  int32x4_t __s1_723 = __p1_723; \
+  int32x2_t __s2_723 = __p2_723; \
+  int64x2_t __ret_723; \
+  __ret_723 = vqdmlsl_s32(__s0_723, vget_high_s32(__s1_723), splat_lane_s32(__s2_723, __p3_723)); \
+  __ret_723; \
 })
 #else
-#define vqdmlsl_high_lane_s32(__p0_708, __p1_708, __p2_708, __p3_708) __extension__ ({ \
-  int64x2_t __s0_708 = __p0_708; \
-  int32x4_t __s1_708 = __p1_708; \
-  int32x2_t __s2_708 = __p2_708; \
-  int64x2_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 1, 0); \
-  int32x4_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \
-  int32x2_t __rev2_708;  __rev2_708 = __builtin_shufflevector(__s2_708, __s2_708, 1, 0); \
-  int64x2_t __ret_708; \
-  __ret_708 = __noswap_vqdmlsl_s32(__rev0_708, __noswap_vget_high_s32(__rev1_708), __noswap_splat_lane_s32(__rev2_708, __p3_708)); \
-  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 1, 0); \
-  __ret_708; \
+#define vqdmlsl_high_lane_s32(__p0_724, __p1_724, __p2_724, __p3_724) __extension__ ({ \
+  int64x2_t __s0_724 = __p0_724; \
+  int32x4_t __s1_724 = __p1_724; \
+  int32x2_t __s2_724 = __p2_724; \
+  int64x2_t __rev0_724;  __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 1, 0); \
+  int32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
+  int32x2_t __rev2_724;  __rev2_724 = __builtin_shufflevector(__s2_724, __s2_724, 1, 0); \
+  int64x2_t __ret_724; \
+  __ret_724 = __noswap_vqdmlsl_s32(__rev0_724, __noswap_vget_high_s32(__rev1_724), __noswap_splat_lane_s32(__rev2_724, __p3_724)); \
+  __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 1, 0); \
+  __ret_724; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s16(__p0_709, __p1_709, __p2_709, __p3_709) __extension__ ({ \
-  int32x4_t __s0_709 = __p0_709; \
-  int16x8_t __s1_709 = __p1_709; \
-  int16x4_t __s2_709 = __p2_709; \
-  int32x4_t __ret_709; \
-  __ret_709 = vqdmlsl_s16(__s0_709, vget_high_s16(__s1_709), splat_lane_s16(__s2_709, __p3_709)); \
-  __ret_709; \
+#define vqdmlsl_high_lane_s16(__p0_725, __p1_725, __p2_725, __p3_725) __extension__ ({ \
+  int32x4_t __s0_725 = __p0_725; \
+  int16x8_t __s1_725 = __p1_725; \
+  int16x4_t __s2_725 = __p2_725; \
+  int32x4_t __ret_725; \
+  __ret_725 = vqdmlsl_s16(__s0_725, vget_high_s16(__s1_725), splat_lane_s16(__s2_725, __p3_725)); \
+  __ret_725; \
 })
 #else
-#define vqdmlsl_high_lane_s16(__p0_710, __p1_710, __p2_710, __p3_710) __extension__ ({ \
-  int32x4_t __s0_710 = __p0_710; \
-  int16x8_t __s1_710 = __p1_710; \
-  int16x4_t __s2_710 = __p2_710; \
-  int32x4_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \
-  int16x8_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_710;  __rev2_710 = __builtin_shufflevector(__s2_710, __s2_710, 3, 2, 1, 0); \
-  int32x4_t __ret_710; \
-  __ret_710 = __noswap_vqdmlsl_s16(__rev0_710, __noswap_vget_high_s16(__rev1_710), __noswap_splat_lane_s16(__rev2_710, __p3_710)); \
-  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 3, 2, 1, 0); \
-  __ret_710; \
+#define vqdmlsl_high_lane_s16(__p0_726, __p1_726, __p2_726, __p3_726) __extension__ ({ \
+  int32x4_t __s0_726 = __p0_726; \
+  int16x8_t __s1_726 = __p1_726; \
+  int16x4_t __s2_726 = __p2_726; \
+  int32x4_t __rev0_726;  __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 3, 2, 1, 0); \
+  int16x8_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_726;  __rev2_726 = __builtin_shufflevector(__s2_726, __s2_726, 3, 2, 1, 0); \
+  int32x4_t __ret_726; \
+  __ret_726 = __noswap_vqdmlsl_s16(__rev0_726, __noswap_vget_high_s16(__rev1_726), __noswap_splat_lane_s16(__rev2_726, __p3_726)); \
+  __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \
+  __ret_726; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \
-  int64x2_t __s0_711 = __p0_711; \
-  int32x4_t __s1_711 = __p1_711; \
-  int32x4_t __s2_711 = __p2_711; \
-  int64x2_t __ret_711; \
-  __ret_711 = vqdmlsl_s32(__s0_711, vget_high_s32(__s1_711), splat_laneq_s32(__s2_711, __p3_711)); \
-  __ret_711; \
+#define vqdmlsl_high_laneq_s32(__p0_727, __p1_727, __p2_727, __p3_727) __extension__ ({ \
+  int64x2_t __s0_727 = __p0_727; \
+  int32x4_t __s1_727 = __p1_727; \
+  int32x4_t __s2_727 = __p2_727; \
+  int64x2_t __ret_727; \
+  __ret_727 = vqdmlsl_s32(__s0_727, vget_high_s32(__s1_727), splat_laneq_s32(__s2_727, __p3_727)); \
+  __ret_727; \
 })
 #else
-#define vqdmlsl_high_laneq_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \
-  int64x2_t __s0_712 = __p0_712; \
-  int32x4_t __s1_712 = __p1_712; \
-  int32x4_t __s2_712 = __p2_712; \
-  int64x2_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \
-  int32x4_t __rev1_712;  __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \
-  int32x4_t __rev2_712;  __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 3, 2, 1, 0); \
-  int64x2_t __ret_712; \
-  __ret_712 = __noswap_vqdmlsl_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_laneq_s32(__rev2_712, __p3_712)); \
-  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \
-  __ret_712; \
+#define vqdmlsl_high_laneq_s32(__p0_728, __p1_728, __p2_728, __p3_728) __extension__ ({ \
+  int64x2_t __s0_728 = __p0_728; \
+  int32x4_t __s1_728 = __p1_728; \
+  int32x4_t __s2_728 = __p2_728; \
+  int64x2_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 1, 0); \
+  int32x4_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 3, 2, 1, 0); \
+  int32x4_t __rev2_728;  __rev2_728 = __builtin_shufflevector(__s2_728, __s2_728, 3, 2, 1, 0); \
+  int64x2_t __ret_728; \
+  __ret_728 = __noswap_vqdmlsl_s32(__rev0_728, __noswap_vget_high_s32(__rev1_728), __noswap_splat_laneq_s32(__rev2_728, __p3_728)); \
+  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \
+  __ret_728; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \
-  int32x4_t __s0_713 = __p0_713; \
-  int16x8_t __s1_713 = __p1_713; \
-  int16x8_t __s2_713 = __p2_713; \
-  int32x4_t __ret_713; \
-  __ret_713 = vqdmlsl_s16(__s0_713, vget_high_s16(__s1_713), splat_laneq_s16(__s2_713, __p3_713)); \
-  __ret_713; \
+#define vqdmlsl_high_laneq_s16(__p0_729, __p1_729, __p2_729, __p3_729) __extension__ ({ \
+  int32x4_t __s0_729 = __p0_729; \
+  int16x8_t __s1_729 = __p1_729; \
+  int16x8_t __s2_729 = __p2_729; \
+  int32x4_t __ret_729; \
+  __ret_729 = vqdmlsl_s16(__s0_729, vget_high_s16(__s1_729), splat_laneq_s16(__s2_729, __p3_729)); \
+  __ret_729; \
 })
 #else
-#define vqdmlsl_high_laneq_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \
-  int32x4_t __s0_714 = __p0_714; \
-  int16x8_t __s1_714 = __p1_714; \
-  int16x8_t __s2_714 = __p2_714; \
-  int32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
-  int16x8_t __rev1_714;  __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_714;  __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_714; \
-  __ret_714 = __noswap_vqdmlsl_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_laneq_s16(__rev2_714, __p3_714)); \
-  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \
-  __ret_714; \
+#define vqdmlsl_high_laneq_s16(__p0_730, __p1_730, __p2_730, __p3_730) __extension__ ({ \
+  int32x4_t __s0_730 = __p0_730; \
+  int16x8_t __s1_730 = __p1_730; \
+  int16x8_t __s2_730 = __p2_730; \
+  int32x4_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 3, 2, 1, 0); \
+  int16x8_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_730;  __rev2_730 = __builtin_shufflevector(__s2_730, __s2_730, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_730; \
+  __ret_730 = __noswap_vqdmlsl_s16(__rev0_730, __noswap_vget_high_s16(__rev1_730), __noswap_splat_laneq_s16(__rev2_730, __p3_730)); \
+  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \
+  __ret_730; \
 })
 #endif
 
@@ -59059,50 +59247,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \
-  int64x2_t __s0_715 = __p0_715; \
-  int32x2_t __s1_715 = __p1_715; \
-  int32x4_t __s2_715 = __p2_715; \
-  int64x2_t __ret_715; \
-  __ret_715 = vqdmlsl_s32(__s0_715, __s1_715, splat_laneq_s32(__s2_715, __p3_715)); \
-  __ret_715; \
+#define vqdmlsl_laneq_s32(__p0_731, __p1_731, __p2_731, __p3_731) __extension__ ({ \
+  int64x2_t __s0_731 = __p0_731; \
+  int32x2_t __s1_731 = __p1_731; \
+  int32x4_t __s2_731 = __p2_731; \
+  int64x2_t __ret_731; \
+  __ret_731 = vqdmlsl_s32(__s0_731, __s1_731, splat_laneq_s32(__s2_731, __p3_731)); \
+  __ret_731; \
 })
 #else
-#define vqdmlsl_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \
-  int64x2_t __s0_716 = __p0_716; \
-  int32x2_t __s1_716 = __p1_716; \
-  int32x4_t __s2_716 = __p2_716; \
-  int64x2_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \
-  int32x2_t __rev1_716;  __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 1, 0); \
-  int32x4_t __rev2_716;  __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \
-  int64x2_t __ret_716; \
-  __ret_716 = __noswap_vqdmlsl_s32(__rev0_716, __rev1_716, __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \
-  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \
-  __ret_716; \
+#define vqdmlsl_laneq_s32(__p0_732, __p1_732, __p2_732, __p3_732) __extension__ ({ \
+  int64x2_t __s0_732 = __p0_732; \
+  int32x2_t __s1_732 = __p1_732; \
+  int32x4_t __s2_732 = __p2_732; \
+  int64x2_t __rev0_732;  __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 1, 0); \
+  int32x2_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 1, 0); \
+  int32x4_t __rev2_732;  __rev2_732 = __builtin_shufflevector(__s2_732, __s2_732, 3, 2, 1, 0); \
+  int64x2_t __ret_732; \
+  __ret_732 = __noswap_vqdmlsl_s32(__rev0_732, __rev1_732, __noswap_splat_laneq_s32(__rev2_732, __p3_732)); \
+  __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 1, 0); \
+  __ret_732; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \
-  int32x4_t __s0_717 = __p0_717; \
-  int16x4_t __s1_717 = __p1_717; \
-  int16x8_t __s2_717 = __p2_717; \
-  int32x4_t __ret_717; \
-  __ret_717 = vqdmlsl_s16(__s0_717, __s1_717, splat_laneq_s16(__s2_717, __p3_717)); \
-  __ret_717; \
+#define vqdmlsl_laneq_s16(__p0_733, __p1_733, __p2_733, __p3_733) __extension__ ({ \
+  int32x4_t __s0_733 = __p0_733; \
+  int16x4_t __s1_733 = __p1_733; \
+  int16x8_t __s2_733 = __p2_733; \
+  int32x4_t __ret_733; \
+  __ret_733 = vqdmlsl_s16(__s0_733, __s1_733, splat_laneq_s16(__s2_733, __p3_733)); \
+  __ret_733; \
 })
 #else
-#define vqdmlsl_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \
-  int32x4_t __s0_718 = __p0_718; \
-  int16x4_t __s1_718 = __p1_718; \
-  int16x8_t __s2_718 = __p2_718; \
-  int32x4_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \
-  int16x4_t __rev1_718;  __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 3, 2, 1, 0); \
-  int16x8_t __rev2_718;  __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_718; \
-  __ret_718 = __noswap_vqdmlsl_s16(__rev0_718, __rev1_718, __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \
-  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \
-  __ret_718; \
+#define vqdmlsl_laneq_s16(__p0_734, __p1_734, __p2_734, __p3_734) __extension__ ({ \
+  int32x4_t __s0_734 = __p0_734; \
+  int16x4_t __s1_734 = __p1_734; \
+  int16x8_t __s2_734 = __p2_734; \
+  int32x4_t __rev0_734;  __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 3, 2, 1, 0); \
+  int16x4_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 3, 2, 1, 0); \
+  int16x8_t __rev2_734;  __rev2_734 = __builtin_shufflevector(__s2_734, __s2_734, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_734; \
+  __ret_734 = __noswap_vqdmlsl_s16(__rev0_734, __rev1_734, __noswap_splat_laneq_s16(__rev2_734, __p3_734)); \
+  __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 3, 2, 1, 0); \
+  __ret_734; \
 })
 #endif
 
@@ -59201,78 +59389,78 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_lane_s32(__p0_719, __p1_719, __p2_719) __extension__ ({ \
-  int32_t __s0_719 = __p0_719; \
-  int32x2_t __s1_719 = __p1_719; \
-  int32_t __ret_719; \
-  __ret_719 = vqdmulhs_s32(__s0_719, vget_lane_s32(__s1_719, __p2_719)); \
-  __ret_719; \
+#define vqdmulhs_lane_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \
+  int32_t __s0_735 = __p0_735; \
+  int32x2_t __s1_735 = __p1_735; \
+  int32_t __ret_735; \
+  __ret_735 = vqdmulhs_s32(__s0_735, vget_lane_s32(__s1_735, __p2_735)); \
+  __ret_735; \
 })
 #else
-#define vqdmulhs_lane_s32(__p0_720, __p1_720, __p2_720) __extension__ ({ \
-  int32_t __s0_720 = __p0_720; \
-  int32x2_t __s1_720 = __p1_720; \
-  int32x2_t __rev1_720;  __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 1, 0); \
-  int32_t __ret_720; \
-  __ret_720 = vqdmulhs_s32(__s0_720, __noswap_vget_lane_s32(__rev1_720, __p2_720)); \
-  __ret_720; \
+#define vqdmulhs_lane_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \
+  int32_t __s0_736 = __p0_736; \
+  int32x2_t __s1_736 = __p1_736; \
+  int32x2_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \
+  int32_t __ret_736; \
+  __ret_736 = vqdmulhs_s32(__s0_736, __noswap_vget_lane_s32(__rev1_736, __p2_736)); \
+  __ret_736; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_lane_s16(__p0_721, __p1_721, __p2_721) __extension__ ({ \
-  int16_t __s0_721 = __p0_721; \
-  int16x4_t __s1_721 = __p1_721; \
-  int16_t __ret_721; \
-  __ret_721 = vqdmulhh_s16(__s0_721, vget_lane_s16(__s1_721, __p2_721)); \
-  __ret_721; \
+#define vqdmulhh_lane_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \
+  int16_t __s0_737 = __p0_737; \
+  int16x4_t __s1_737 = __p1_737; \
+  int16_t __ret_737; \
+  __ret_737 = vqdmulhh_s16(__s0_737, vget_lane_s16(__s1_737, __p2_737)); \
+  __ret_737; \
 })
 #else
-#define vqdmulhh_lane_s16(__p0_722, __p1_722, __p2_722) __extension__ ({ \
-  int16_t __s0_722 = __p0_722; \
-  int16x4_t __s1_722 = __p1_722; \
-  int16x4_t __rev1_722;  __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 3, 2, 1, 0); \
-  int16_t __ret_722; \
-  __ret_722 = vqdmulhh_s16(__s0_722, __noswap_vget_lane_s16(__rev1_722, __p2_722)); \
-  __ret_722; \
+#define vqdmulhh_lane_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \
+  int16_t __s0_738 = __p0_738; \
+  int16x4_t __s1_738 = __p1_738; \
+  int16x4_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 3, 2, 1, 0); \
+  int16_t __ret_738; \
+  __ret_738 = vqdmulhh_s16(__s0_738, __noswap_vget_lane_s16(__rev1_738, __p2_738)); \
+  __ret_738; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_laneq_s32(__p0_723, __p1_723, __p2_723) __extension__ ({ \
-  int32_t __s0_723 = __p0_723; \
-  int32x4_t __s1_723 = __p1_723; \
-  int32_t __ret_723; \
-  __ret_723 = vqdmulhs_s32(__s0_723, vgetq_lane_s32(__s1_723, __p2_723)); \
-  __ret_723; \
+#define vqdmulhs_laneq_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \
+  int32_t __s0_739 = __p0_739; \
+  int32x4_t __s1_739 = __p1_739; \
+  int32_t __ret_739; \
+  __ret_739 = vqdmulhs_s32(__s0_739, vgetq_lane_s32(__s1_739, __p2_739)); \
+  __ret_739; \
 })
 #else
-#define vqdmulhs_laneq_s32(__p0_724, __p1_724, __p2_724) __extension__ ({ \
-  int32_t __s0_724 = __p0_724; \
-  int32x4_t __s1_724 = __p1_724; \
-  int32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
-  int32_t __ret_724; \
-  __ret_724 = vqdmulhs_s32(__s0_724, __noswap_vgetq_lane_s32(__rev1_724, __p2_724)); \
-  __ret_724; \
+#define vqdmulhs_laneq_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \
+  int32_t __s0_740 = __p0_740; \
+  int32x4_t __s1_740 = __p1_740; \
+  int32x4_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 3, 2, 1, 0); \
+  int32_t __ret_740; \
+  __ret_740 = vqdmulhs_s32(__s0_740, __noswap_vgetq_lane_s32(__rev1_740, __p2_740)); \
+  __ret_740; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_laneq_s16(__p0_725, __p1_725, __p2_725) __extension__ ({ \
-  int16_t __s0_725 = __p0_725; \
-  int16x8_t __s1_725 = __p1_725; \
-  int16_t __ret_725; \
-  __ret_725 = vqdmulhh_s16(__s0_725, vgetq_lane_s16(__s1_725, __p2_725)); \
-  __ret_725; \
+#define vqdmulhh_laneq_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \
+  int16_t __s0_741 = __p0_741; \
+  int16x8_t __s1_741 = __p1_741; \
+  int16_t __ret_741; \
+  __ret_741 = vqdmulhh_s16(__s0_741, vgetq_lane_s16(__s1_741, __p2_741)); \
+  __ret_741; \
 })
 #else
-#define vqdmulhh_laneq_s16(__p0_726, __p1_726, __p2_726) __extension__ ({ \
-  int16_t __s0_726 = __p0_726; \
-  int16x8_t __s1_726 = __p1_726; \
-  int16x8_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_726; \
-  __ret_726 = vqdmulhh_s16(__s0_726, __noswap_vgetq_lane_s16(__rev1_726, __p2_726)); \
-  __ret_726; \
+#define vqdmulhh_laneq_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \
+  int16_t __s0_742 = __p0_742; \
+  int16x8_t __s1_742 = __p1_742; \
+  int16x8_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_742; \
+  __ret_742 = vqdmulhh_s16(__s0_742, __noswap_vgetq_lane_s16(__rev1_742, __p2_742)); \
+  __ret_742; \
 })
 #endif
 
@@ -59405,86 +59593,86 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s32(__p0_727, __p1_727, __p2_727) __extension__ ({ \
-  int32x4_t __s0_727 = __p0_727; \
-  int32x2_t __s1_727 = __p1_727; \
-  int64x2_t __ret_727; \
-  __ret_727 = vqdmull_s32(vget_high_s32(__s0_727), splat_lane_s32(__s1_727, __p2_727)); \
-  __ret_727; \
+#define vqdmull_high_lane_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \
+  int32x4_t __s0_743 = __p0_743; \
+  int32x2_t __s1_743 = __p1_743; \
+  int64x2_t __ret_743; \
+  __ret_743 = vqdmull_s32(vget_high_s32(__s0_743), splat_lane_s32(__s1_743, __p2_743)); \
+  __ret_743; \
 })
 #else
-#define vqdmull_high_lane_s32(__p0_728, __p1_728, __p2_728) __extension__ ({ \
-  int32x4_t __s0_728 = __p0_728; \
-  int32x2_t __s1_728 = __p1_728; \
-  int32x4_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 3, 2, 1, 0); \
-  int32x2_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 1, 0); \
-  int64x2_t __ret_728; \
-  __ret_728 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_728), __noswap_splat_lane_s32(__rev1_728, __p2_728)); \
-  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \
-  __ret_728; \
+#define vqdmull_high_lane_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \
+  int32x4_t __s0_744 = __p0_744; \
+  int32x2_t __s1_744 = __p1_744; \
+  int32x4_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 3, 2, 1, 0); \
+  int32x2_t __rev1_744;  __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 1, 0); \
+  int64x2_t __ret_744; \
+  __ret_744 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_744), __noswap_splat_lane_s32(__rev1_744, __p2_744)); \
+  __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \
+  __ret_744; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s16(__p0_729, __p1_729, __p2_729) __extension__ ({ \
-  int16x8_t __s0_729 = __p0_729; \
-  int16x4_t __s1_729 = __p1_729; \
-  int32x4_t __ret_729; \
-  __ret_729 = vqdmull_s16(vget_high_s16(__s0_729), splat_lane_s16(__s1_729, __p2_729)); \
-  __ret_729; \
+#define vqdmull_high_lane_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \
+  int16x8_t __s0_745 = __p0_745; \
+  int16x4_t __s1_745 = __p1_745; \
+  int32x4_t __ret_745; \
+  __ret_745 = vqdmull_s16(vget_high_s16(__s0_745), splat_lane_s16(__s1_745, __p2_745)); \
+  __ret_745; \
 })
 #else
-#define vqdmull_high_lane_s16(__p0_730, __p1_730, __p2_730) __extension__ ({ \
-  int16x8_t __s0_730 = __p0_730; \
-  int16x4_t __s1_730 = __p1_730; \
-  int16x8_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 3, 2, 1, 0); \
-  int32x4_t __ret_730; \
-  __ret_730 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_730), __noswap_splat_lane_s16(__rev1_730, __p2_730)); \
-  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \
-  __ret_730; \
+#define vqdmull_high_lane_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \
+  int16x8_t __s0_746 = __p0_746; \
+  int16x4_t __s1_746 = __p1_746; \
+  int16x8_t __rev0_746;  __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_746;  __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 3, 2, 1, 0); \
+  int32x4_t __ret_746; \
+  __ret_746 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_746), __noswap_splat_lane_s16(__rev1_746, __p2_746)); \
+  __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \
+  __ret_746; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s32(__p0_731, __p1_731, __p2_731) __extension__ ({ \
-  int32x4_t __s0_731 = __p0_731; \
-  int32x4_t __s1_731 = __p1_731; \
-  int64x2_t __ret_731; \
-  __ret_731 = vqdmull_s32(vget_high_s32(__s0_731), splat_laneq_s32(__s1_731, __p2_731)); \
-  __ret_731; \
+#define vqdmull_high_laneq_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \
+  int32x4_t __s0_747 = __p0_747; \
+  int32x4_t __s1_747 = __p1_747; \
+  int64x2_t __ret_747; \
+  __ret_747 = vqdmull_s32(vget_high_s32(__s0_747), splat_laneq_s32(__s1_747, __p2_747)); \
+  __ret_747; \
 })
 #else
-#define vqdmull_high_laneq_s32(__p0_732, __p1_732, __p2_732) __extension__ ({ \
-  int32x4_t __s0_732 = __p0_732; \
-  int32x4_t __s1_732 = __p1_732; \
-  int32x4_t __rev0_732;  __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 3, 2, 1, 0); \
-  int32x4_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 3, 2, 1, 0); \
-  int64x2_t __ret_732; \
-  __ret_732 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_732), __noswap_splat_laneq_s32(__rev1_732, __p2_732)); \
-  __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 1, 0); \
-  __ret_732; \
+#define vqdmull_high_laneq_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \
+  int32x4_t __s0_748 = __p0_748; \
+  int32x4_t __s1_748 = __p1_748; \
+  int32x4_t __rev0_748;  __rev0_748 = __builtin_shufflevector(__s0_748, __s0_748, 3, 2, 1, 0); \
+  int32x4_t __rev1_748;  __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 3, 2, 1, 0); \
+  int64x2_t __ret_748; \
+  __ret_748 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_748), __noswap_splat_laneq_s32(__rev1_748, __p2_748)); \
+  __ret_748 = __builtin_shufflevector(__ret_748, __ret_748, 1, 0); \
+  __ret_748; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \
-  int16x8_t __s0_733 = __p0_733; \
-  int16x8_t __s1_733 = __p1_733; \
-  int32x4_t __ret_733; \
-  __ret_733 = vqdmull_s16(vget_high_s16(__s0_733), splat_laneq_s16(__s1_733, __p2_733)); \
-  __ret_733; \
+#define vqdmull_high_laneq_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \
+  int16x8_t __s0_749 = __p0_749; \
+  int16x8_t __s1_749 = __p1_749; \
+  int32x4_t __ret_749; \
+  __ret_749 = vqdmull_s16(vget_high_s16(__s0_749), splat_laneq_s16(__s1_749, __p2_749)); \
+  __ret_749; \
 })
 #else
-#define vqdmull_high_laneq_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \
-  int16x8_t __s0_734 = __p0_734; \
-  int16x8_t __s1_734 = __p1_734; \
-  int16x8_t __rev0_734;  __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_734; \
-  __ret_734 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_734), __noswap_splat_laneq_s16(__rev1_734, __p2_734)); \
-  __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 3, 2, 1, 0); \
-  __ret_734; \
+#define vqdmull_high_laneq_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \
+  int16x8_t __s0_750 = __p0_750; \
+  int16x8_t __s1_750 = __p1_750; \
+  int16x8_t __rev0_750;  __rev0_750 = __builtin_shufflevector(__s0_750, __s0_750, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_750; \
+  __ret_750 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_750), __noswap_splat_laneq_s16(__rev1_750, __p2_750)); \
+  __ret_750 = __builtin_shufflevector(__ret_750, __ret_750, 3, 2, 1, 0); \
+  __ret_750; \
 })
 #endif
 
@@ -59521,120 +59709,120 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulls_lane_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \
-  int32_t __s0_735 = __p0_735; \
-  int32x2_t __s1_735 = __p1_735; \
-  int64_t __ret_735; \
-  __ret_735 = vqdmulls_s32(__s0_735, vget_lane_s32(__s1_735, __p2_735)); \
-  __ret_735; \
+#define vqdmulls_lane_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \
+  int32_t __s0_751 = __p0_751; \
+  int32x2_t __s1_751 = __p1_751; \
+  int64_t __ret_751; \
+  __ret_751 = vqdmulls_s32(__s0_751, vget_lane_s32(__s1_751, __p2_751)); \
+  __ret_751; \
 })
 #else
-#define vqdmulls_lane_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \
-  int32_t __s0_736 = __p0_736; \
-  int32x2_t __s1_736 = __p1_736; \
-  int32x2_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \
-  int64_t __ret_736; \
-  __ret_736 = vqdmulls_s32(__s0_736, __noswap_vget_lane_s32(__rev1_736, __p2_736)); \
-  __ret_736; \
+#define vqdmulls_lane_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \
+  int32_t __s0_752 = __p0_752; \
+  int32x2_t __s1_752 = __p1_752; \
+  int32x2_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 1, 0); \
+  int64_t __ret_752; \
+  __ret_752 = vqdmulls_s32(__s0_752, __noswap_vget_lane_s32(__rev1_752, __p2_752)); \
+  __ret_752; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmullh_lane_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \
-  int16_t __s0_737 = __p0_737; \
-  int16x4_t __s1_737 = __p1_737; \
-  int32_t __ret_737; \
-  __ret_737 = vqdmullh_s16(__s0_737, vget_lane_s16(__s1_737, __p2_737)); \
-  __ret_737; \
+#define vqdmullh_lane_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \
+  int16_t __s0_753 = __p0_753; \
+  int16x4_t __s1_753 = __p1_753; \
+  int32_t __ret_753; \
+  __ret_753 = vqdmullh_s16(__s0_753, vget_lane_s16(__s1_753, __p2_753)); \
+  __ret_753; \
 })
 #else
-#define vqdmullh_lane_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \
-  int16_t __s0_738 = __p0_738; \
-  int16x4_t __s1_738 = __p1_738; \
-  int16x4_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 3, 2, 1, 0); \
-  int32_t __ret_738; \
-  __ret_738 = vqdmullh_s16(__s0_738, __noswap_vget_lane_s16(__rev1_738, __p2_738)); \
-  __ret_738; \
+#define vqdmullh_lane_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \
+  int16_t __s0_754 = __p0_754; \
+  int16x4_t __s1_754 = __p1_754; \
+  int16x4_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 3, 2, 1, 0); \
+  int32_t __ret_754; \
+  __ret_754 = vqdmullh_s16(__s0_754, __noswap_vget_lane_s16(__rev1_754, __p2_754)); \
+  __ret_754; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulls_laneq_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \
-  int32_t __s0_739 = __p0_739; \
-  int32x4_t __s1_739 = __p1_739; \
-  int64_t __ret_739; \
-  __ret_739 = vqdmulls_s32(__s0_739, vgetq_lane_s32(__s1_739, __p2_739)); \
-  __ret_739; \
+#define vqdmulls_laneq_s32(__p0_755, __p1_755, __p2_755) __extension__ ({ \
+  int32_t __s0_755 = __p0_755; \
+  int32x4_t __s1_755 = __p1_755; \
+  int64_t __ret_755; \
+  __ret_755 = vqdmulls_s32(__s0_755, vgetq_lane_s32(__s1_755, __p2_755)); \
+  __ret_755; \
 })
 #else
-#define vqdmulls_laneq_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \
-  int32_t __s0_740 = __p0_740; \
-  int32x4_t __s1_740 = __p1_740; \
-  int32x4_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 3, 2, 1, 0); \
-  int64_t __ret_740; \
-  __ret_740 = vqdmulls_s32(__s0_740, __noswap_vgetq_lane_s32(__rev1_740, __p2_740)); \
-  __ret_740; \
+#define vqdmulls_laneq_s32(__p0_756, __p1_756, __p2_756) __extension__ ({ \
+  int32_t __s0_756 = __p0_756; \
+  int32x4_t __s1_756 = __p1_756; \
+  int32x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
+  int64_t __ret_756; \
+  __ret_756 = vqdmulls_s32(__s0_756, __noswap_vgetq_lane_s32(__rev1_756, __p2_756)); \
+  __ret_756; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmullh_laneq_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \
-  int16_t __s0_741 = __p0_741; \
-  int16x8_t __s1_741 = __p1_741; \
-  int32_t __ret_741; \
-  __ret_741 = vqdmullh_s16(__s0_741, vgetq_lane_s16(__s1_741, __p2_741)); \
-  __ret_741; \
+#define vqdmullh_laneq_s16(__p0_757, __p1_757, __p2_757) __extension__ ({ \
+  int16_t __s0_757 = __p0_757; \
+  int16x8_t __s1_757 = __p1_757; \
+  int32_t __ret_757; \
+  __ret_757 = vqdmullh_s16(__s0_757, vgetq_lane_s16(__s1_757, __p2_757)); \
+  __ret_757; \
 })
 #else
-#define vqdmullh_laneq_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \
-  int16_t __s0_742 = __p0_742; \
-  int16x8_t __s1_742 = __p1_742; \
-  int16x8_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret_742; \
-  __ret_742 = vqdmullh_s16(__s0_742, __noswap_vgetq_lane_s16(__rev1_742, __p2_742)); \
-  __ret_742; \
+#define vqdmullh_laneq_s16(__p0_758, __p1_758, __p2_758) __extension__ ({ \
+  int16_t __s0_758 = __p0_758; \
+  int16x8_t __s1_758 = __p1_758; \
+  int16x8_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32_t __ret_758; \
+  __ret_758 = vqdmullh_s16(__s0_758, __noswap_vgetq_lane_s16(__rev1_758, __p2_758)); \
+  __ret_758; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \
-  int32x2_t __s0_743 = __p0_743; \
-  int32x4_t __s1_743 = __p1_743; \
-  int64x2_t __ret_743; \
-  __ret_743 = vqdmull_s32(__s0_743, splat_laneq_s32(__s1_743, __p2_743)); \
-  __ret_743; \
+#define vqdmull_laneq_s32(__p0_759, __p1_759, __p2_759) __extension__ ({ \
+  int32x2_t __s0_759 = __p0_759; \
+  int32x4_t __s1_759 = __p1_759; \
+  int64x2_t __ret_759; \
+  __ret_759 = vqdmull_s32(__s0_759, splat_laneq_s32(__s1_759, __p2_759)); \
+  __ret_759; \
 })
 #else
-#define vqdmull_laneq_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \
-  int32x2_t __s0_744 = __p0_744; \
-  int32x4_t __s1_744 = __p1_744; \
-  int32x2_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 1, 0); \
-  int32x4_t __rev1_744;  __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 3, 2, 1, 0); \
-  int64x2_t __ret_744; \
-  __ret_744 = __noswap_vqdmull_s32(__rev0_744, __noswap_splat_laneq_s32(__rev1_744, __p2_744)); \
-  __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \
-  __ret_744; \
+#define vqdmull_laneq_s32(__p0_760, __p1_760, __p2_760) __extension__ ({ \
+  int32x2_t __s0_760 = __p0_760; \
+  int32x4_t __s1_760 = __p1_760; \
+  int32x2_t __rev0_760;  __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 1, 0); \
+  int32x4_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 3, 2, 1, 0); \
+  int64x2_t __ret_760; \
+  __ret_760 = __noswap_vqdmull_s32(__rev0_760, __noswap_splat_laneq_s32(__rev1_760, __p2_760)); \
+  __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 1, 0); \
+  __ret_760; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \
-  int16x4_t __s0_745 = __p0_745; \
-  int16x8_t __s1_745 = __p1_745; \
-  int32x4_t __ret_745; \
-  __ret_745 = vqdmull_s16(__s0_745, splat_laneq_s16(__s1_745, __p2_745)); \
-  __ret_745; \
+#define vqdmull_laneq_s16(__p0_761, __p1_761, __p2_761) __extension__ ({ \
+  int16x4_t __s0_761 = __p0_761; \
+  int16x8_t __s1_761 = __p1_761; \
+  int32x4_t __ret_761; \
+  __ret_761 = vqdmull_s16(__s0_761, splat_laneq_s16(__s1_761, __p2_761)); \
+  __ret_761; \
 })
 #else
-#define vqdmull_laneq_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \
-  int16x4_t __s0_746 = __p0_746; \
-  int16x8_t __s1_746 = __p1_746; \
-  int16x4_t __rev0_746;  __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 3, 2, 1, 0); \
-  int16x8_t __rev1_746;  __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_746; \
-  __ret_746 = __noswap_vqdmull_s16(__rev0_746, __noswap_splat_laneq_s16(__rev1_746, __p2_746)); \
-  __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \
-  __ret_746; \
+#define vqdmull_laneq_s16(__p0_762, __p1_762, __p2_762) __extension__ ({ \
+  int16x4_t __s0_762 = __p0_762; \
+  int16x8_t __s1_762 = __p1_762; \
+  int16x4_t __rev0_762;  __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 3, 2, 1, 0); \
+  int16x8_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_762; \
+  __ret_762 = __noswap_vqdmull_s16(__rev0_762, __noswap_splat_laneq_s16(__rev1_762, __p2_762)); \
+  __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 3, 2, 1, 0); \
+  __ret_762; \
 })
 #endif
 
@@ -59972,78 +60160,78 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_lane_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \
-  int32_t __s0_747 = __p0_747; \
-  int32x2_t __s1_747 = __p1_747; \
-  int32_t __ret_747; \
-  __ret_747 = vqrdmulhs_s32(__s0_747, vget_lane_s32(__s1_747, __p2_747)); \
-  __ret_747; \
+#define vqrdmulhs_lane_s32(__p0_763, __p1_763, __p2_763) __extension__ ({ \
+  int32_t __s0_763 = __p0_763; \
+  int32x2_t __s1_763 = __p1_763; \
+  int32_t __ret_763; \
+  __ret_763 = vqrdmulhs_s32(__s0_763, vget_lane_s32(__s1_763, __p2_763)); \
+  __ret_763; \
 })
 #else
-#define vqrdmulhs_lane_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \
-  int32_t __s0_748 = __p0_748; \
-  int32x2_t __s1_748 = __p1_748; \
-  int32x2_t __rev1_748;  __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 1, 0); \
-  int32_t __ret_748; \
-  __ret_748 = vqrdmulhs_s32(__s0_748, __noswap_vget_lane_s32(__rev1_748, __p2_748)); \
-  __ret_748; \
+#define vqrdmulhs_lane_s32(__p0_764, __p1_764, __p2_764) __extension__ ({ \
+  int32_t __s0_764 = __p0_764; \
+  int32x2_t __s1_764 = __p1_764; \
+  int32x2_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 1, 0); \
+  int32_t __ret_764; \
+  __ret_764 = vqrdmulhs_s32(__s0_764, __noswap_vget_lane_s32(__rev1_764, __p2_764)); \
+  __ret_764; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_lane_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \
-  int16_t __s0_749 = __p0_749; \
-  int16x4_t __s1_749 = __p1_749; \
-  int16_t __ret_749; \
-  __ret_749 = vqrdmulhh_s16(__s0_749, vget_lane_s16(__s1_749, __p2_749)); \
-  __ret_749; \
+#define vqrdmulhh_lane_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
+  int16_t __s0_765 = __p0_765; \
+  int16x4_t __s1_765 = __p1_765; \
+  int16_t __ret_765; \
+  __ret_765 = vqrdmulhh_s16(__s0_765, vget_lane_s16(__s1_765, __p2_765)); \
+  __ret_765; \
 })
 #else
-#define vqrdmulhh_lane_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \
-  int16_t __s0_750 = __p0_750; \
-  int16x4_t __s1_750 = __p1_750; \
-  int16x4_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 3, 2, 1, 0); \
-  int16_t __ret_750; \
-  __ret_750 = vqrdmulhh_s16(__s0_750, __noswap_vget_lane_s16(__rev1_750, __p2_750)); \
-  __ret_750; \
+#define vqrdmulhh_lane_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
+  int16_t __s0_766 = __p0_766; \
+  int16x4_t __s1_766 = __p1_766; \
+  int16x4_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 3, 2, 1, 0); \
+  int16_t __ret_766; \
+  __ret_766 = vqrdmulhh_s16(__s0_766, __noswap_vget_lane_s16(__rev1_766, __p2_766)); \
+  __ret_766; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_laneq_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \
-  int32_t __s0_751 = __p0_751; \
-  int32x4_t __s1_751 = __p1_751; \
-  int32_t __ret_751; \
-  __ret_751 = vqrdmulhs_s32(__s0_751, vgetq_lane_s32(__s1_751, __p2_751)); \
-  __ret_751; \
+#define vqrdmulhs_laneq_s32(__p0_767, __p1_767, __p2_767) __extension__ ({ \
+  int32_t __s0_767 = __p0_767; \
+  int32x4_t __s1_767 = __p1_767; \
+  int32_t __ret_767; \
+  __ret_767 = vqrdmulhs_s32(__s0_767, vgetq_lane_s32(__s1_767, __p2_767)); \
+  __ret_767; \
 })
 #else
-#define vqrdmulhs_laneq_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \
-  int32_t __s0_752 = __p0_752; \
-  int32x4_t __s1_752 = __p1_752; \
-  int32x4_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 3, 2, 1, 0); \
-  int32_t __ret_752; \
-  __ret_752 = vqrdmulhs_s32(__s0_752, __noswap_vgetq_lane_s32(__rev1_752, __p2_752)); \
-  __ret_752; \
+#define vqrdmulhs_laneq_s32(__p0_768, __p1_768, __p2_768) __extension__ ({ \
+  int32_t __s0_768 = __p0_768; \
+  int32x4_t __s1_768 = __p1_768; \
+  int32x4_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \
+  int32_t __ret_768; \
+  __ret_768 = vqrdmulhs_s32(__s0_768, __noswap_vgetq_lane_s32(__rev1_768, __p2_768)); \
+  __ret_768; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_laneq_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \
-  int16_t __s0_753 = __p0_753; \
-  int16x8_t __s1_753 = __p1_753; \
-  int16_t __ret_753; \
-  __ret_753 = vqrdmulhh_s16(__s0_753, vgetq_lane_s16(__s1_753, __p2_753)); \
-  __ret_753; \
+#define vqrdmulhh_laneq_s16(__p0_769, __p1_769, __p2_769) __extension__ ({ \
+  int16_t __s0_769 = __p0_769; \
+  int16x8_t __s1_769 = __p1_769; \
+  int16_t __ret_769; \
+  __ret_769 = vqrdmulhh_s16(__s0_769, vgetq_lane_s16(__s1_769, __p2_769)); \
+  __ret_769; \
 })
 #else
-#define vqrdmulhh_laneq_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \
-  int16_t __s0_754 = __p0_754; \
-  int16x8_t __s1_754 = __p1_754; \
-  int16x8_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_754; \
-  __ret_754 = vqrdmulhh_s16(__s0_754, __noswap_vgetq_lane_s16(__rev1_754, __p2_754)); \
-  __ret_754; \
+#define vqrdmulhh_laneq_s16(__p0_770, __p1_770, __p2_770) __extension__ ({ \
+  int16_t __s0_770 = __p0_770; \
+  int16x8_t __s1_770 = __p1_770; \
+  int16x8_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_770; \
+  __ret_770 = vqrdmulhh_s16(__s0_770, __noswap_vgetq_lane_s16(__rev1_770, __p2_770)); \
+  __ret_770; \
 })
 #endif
 
@@ -60172,128 +60360,128 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u32(__p0_755, __p1_755, __p2_755) __extension__ ({ \
-  uint16x4_t __s0_755 = __p0_755; \
-  uint32x4_t __s1_755 = __p1_755; \
-  uint16x8_t __ret_755; \
-  __ret_755 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_755), (uint16x4_t)(vqrshrn_n_u32(__s1_755, __p2_755)))); \
-  __ret_755; \
+#define vqrshrn_high_n_u32(__p0_771, __p1_771, __p2_771) __extension__ ({ \
+  uint16x4_t __s0_771 = __p0_771; \
+  uint32x4_t __s1_771 = __p1_771; \
+  uint16x8_t __ret_771; \
+  __ret_771 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_771), (uint16x4_t)(vqrshrn_n_u32(__s1_771, __p2_771)))); \
+  __ret_771; \
 })
 #else
-#define vqrshrn_high_n_u32(__p0_756, __p1_756, __p2_756) __extension__ ({ \
-  uint16x4_t __s0_756 = __p0_756; \
-  uint32x4_t __s1_756 = __p1_756; \
-  uint16x4_t __rev0_756;  __rev0_756 = __builtin_shufflevector(__s0_756, __s0_756, 3, 2, 1, 0); \
-  uint32x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
-  uint16x8_t __ret_756; \
-  __ret_756 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_756), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_756, __p2_756)))); \
-  __ret_756 = __builtin_shufflevector(__ret_756, __ret_756, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_756; \
+#define vqrshrn_high_n_u32(__p0_772, __p1_772, __p2_772) __extension__ ({ \
+  uint16x4_t __s0_772 = __p0_772; \
+  uint32x4_t __s1_772 = __p1_772; \
+  uint16x4_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 3, 2, 1, 0); \
+  uint32x4_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 3, 2, 1, 0); \
+  uint16x8_t __ret_772; \
+  __ret_772 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_772), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_772, __p2_772)))); \
+  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_772; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u64(__p0_757, __p1_757, __p2_757) __extension__ ({ \
-  uint32x2_t __s0_757 = __p0_757; \
-  uint64x2_t __s1_757 = __p1_757; \
-  uint32x4_t __ret_757; \
-  __ret_757 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_757), (uint32x2_t)(vqrshrn_n_u64(__s1_757, __p2_757)))); \
-  __ret_757; \
+#define vqrshrn_high_n_u64(__p0_773, __p1_773, __p2_773) __extension__ ({ \
+  uint32x2_t __s0_773 = __p0_773; \
+  uint64x2_t __s1_773 = __p1_773; \
+  uint32x4_t __ret_773; \
+  __ret_773 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_773), (uint32x2_t)(vqrshrn_n_u64(__s1_773, __p2_773)))); \
+  __ret_773; \
 })
 #else
-#define vqrshrn_high_n_u64(__p0_758, __p1_758, __p2_758) __extension__ ({ \
-  uint32x2_t __s0_758 = __p0_758; \
-  uint64x2_t __s1_758 = __p1_758; \
-  uint32x2_t __rev0_758;  __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 1, 0); \
-  uint64x2_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 1, 0); \
-  uint32x4_t __ret_758; \
-  __ret_758 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_758), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_758, __p2_758)))); \
-  __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 3, 2, 1, 0); \
-  __ret_758; \
+#define vqrshrn_high_n_u64(__p0_774, __p1_774, __p2_774) __extension__ ({ \
+  uint32x2_t __s0_774 = __p0_774; \
+  uint64x2_t __s1_774 = __p1_774; \
+  uint32x2_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 1, 0); \
+  uint64x2_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 1, 0); \
+  uint32x4_t __ret_774; \
+  __ret_774 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_774), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_774, __p2_774)))); \
+  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 3, 2, 1, 0); \
+  __ret_774; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u16(__p0_759, __p1_759, __p2_759) __extension__ ({ \
-  uint8x8_t __s0_759 = __p0_759; \
-  uint16x8_t __s1_759 = __p1_759; \
-  uint8x16_t __ret_759; \
-  __ret_759 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_759), (uint8x8_t)(vqrshrn_n_u16(__s1_759, __p2_759)))); \
-  __ret_759; \
+#define vqrshrn_high_n_u16(__p0_775, __p1_775, __p2_775) __extension__ ({ \
+  uint8x8_t __s0_775 = __p0_775; \
+  uint16x8_t __s1_775 = __p1_775; \
+  uint8x16_t __ret_775; \
+  __ret_775 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_775), (uint8x8_t)(vqrshrn_n_u16(__s1_775, __p2_775)))); \
+  __ret_775; \
 })
 #else
-#define vqrshrn_high_n_u16(__p0_760, __p1_760, __p2_760) __extension__ ({ \
-  uint8x8_t __s0_760 = __p0_760; \
-  uint16x8_t __s1_760 = __p1_760; \
-  uint8x8_t __rev0_760;  __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_760; \
-  __ret_760 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_760), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_760, __p2_760)))); \
-  __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_760; \
+#define vqrshrn_high_n_u16(__p0_776, __p1_776, __p2_776) __extension__ ({ \
+  uint8x8_t __s0_776 = __p0_776; \
+  uint16x8_t __s1_776 = __p1_776; \
+  uint8x8_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_776; \
+  __ret_776 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_776), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_776, __p2_776)))); \
+  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_776; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s32(__p0_761, __p1_761, __p2_761) __extension__ ({ \
-  int16x4_t __s0_761 = __p0_761; \
-  int32x4_t __s1_761 = __p1_761; \
-  int16x8_t __ret_761; \
-  __ret_761 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_761), (int16x4_t)(vqrshrn_n_s32(__s1_761, __p2_761)))); \
-  __ret_761; \
+#define vqrshrn_high_n_s32(__p0_777, __p1_777, __p2_777) __extension__ ({ \
+  int16x4_t __s0_777 = __p0_777; \
+  int32x4_t __s1_777 = __p1_777; \
+  int16x8_t __ret_777; \
+  __ret_777 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_777), (int16x4_t)(vqrshrn_n_s32(__s1_777, __p2_777)))); \
+  __ret_777; \
 })
 #else
-#define vqrshrn_high_n_s32(__p0_762, __p1_762, __p2_762) __extension__ ({ \
-  int16x4_t __s0_762 = __p0_762; \
-  int32x4_t __s1_762 = __p1_762; \
-  int16x4_t __rev0_762;  __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 3, 2, 1, 0); \
-  int32x4_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 3, 2, 1, 0); \
-  int16x8_t __ret_762; \
-  __ret_762 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_762), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_762, __p2_762)))); \
-  __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_762; \
+#define vqrshrn_high_n_s32(__p0_778, __p1_778, __p2_778) __extension__ ({ \
+  int16x4_t __s0_778 = __p0_778; \
+  int32x4_t __s1_778 = __p1_778; \
+  int16x4_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 3, 2, 1, 0); \
+  int32x4_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 3, 2, 1, 0); \
+  int16x8_t __ret_778; \
+  __ret_778 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_778), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_778, __p2_778)))); \
+  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_778; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s64(__p0_763, __p1_763, __p2_763) __extension__ ({ \
-  int32x2_t __s0_763 = __p0_763; \
-  int64x2_t __s1_763 = __p1_763; \
-  int32x4_t __ret_763; \
-  __ret_763 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_763), (int32x2_t)(vqrshrn_n_s64(__s1_763, __p2_763)))); \
-  __ret_763; \
+#define vqrshrn_high_n_s64(__p0_779, __p1_779, __p2_779) __extension__ ({ \
+  int32x2_t __s0_779 = __p0_779; \
+  int64x2_t __s1_779 = __p1_779; \
+  int32x4_t __ret_779; \
+  __ret_779 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_779), (int32x2_t)(vqrshrn_n_s64(__s1_779, __p2_779)))); \
+  __ret_779; \
 })
 #else
-#define vqrshrn_high_n_s64(__p0_764, __p1_764, __p2_764) __extension__ ({ \
-  int32x2_t __s0_764 = __p0_764; \
-  int64x2_t __s1_764 = __p1_764; \
-  int32x2_t __rev0_764;  __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 1, 0); \
-  int64x2_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 1, 0); \
-  int32x4_t __ret_764; \
-  __ret_764 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_764), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_764, __p2_764)))); \
-  __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 3, 2, 1, 0); \
-  __ret_764; \
+#define vqrshrn_high_n_s64(__p0_780, __p1_780, __p2_780) __extension__ ({ \
+  int32x2_t __s0_780 = __p0_780; \
+  int64x2_t __s1_780 = __p1_780; \
+  int32x2_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 1, 0); \
+  int64x2_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 1, 0); \
+  int32x4_t __ret_780; \
+  __ret_780 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_780), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_780, __p2_780)))); \
+  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 3, 2, 1, 0); \
+  __ret_780; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
-  int8x8_t __s0_765 = __p0_765; \
-  int16x8_t __s1_765 = __p1_765; \
-  int8x16_t __ret_765; \
-  __ret_765 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_765), (int8x8_t)(vqrshrn_n_s16(__s1_765, __p2_765)))); \
-  __ret_765; \
+#define vqrshrn_high_n_s16(__p0_781, __p1_781, __p2_781) __extension__ ({ \
+  int8x8_t __s0_781 = __p0_781; \
+  int16x8_t __s1_781 = __p1_781; \
+  int8x16_t __ret_781; \
+  __ret_781 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_781), (int8x8_t)(vqrshrn_n_s16(__s1_781, __p2_781)))); \
+  __ret_781; \
 })
 #else
-#define vqrshrn_high_n_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
-  int8x8_t __s0_766 = __p0_766; \
-  int16x8_t __s1_766 = __p1_766; \
-  int8x8_t __rev0_766;  __rev0_766 = __builtin_shufflevector(__s0_766, __s0_766, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_766; \
-  __ret_766 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_766), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_766, __p2_766)))); \
-  __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_766; \
+#define vqrshrn_high_n_s16(__p0_782, __p1_782, __p2_782) __extension__ ({ \
+  int8x8_t __s0_782 = __p0_782; \
+  int16x8_t __s1_782 = __p1_782; \
+  int8x8_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_782; \
+  __ret_782 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_782), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_782, __p2_782)))); \
+  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_782; \
 })
 #endif
 
@@ -60334,65 +60522,65 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s32(__p0_767, __p1_767, __p2_767) __extension__ ({ \
-  int16x4_t __s0_767 = __p0_767; \
-  int32x4_t __s1_767 = __p1_767; \
-  int16x8_t __ret_767; \
-  __ret_767 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_767), (int16x4_t)(vqrshrun_n_s32(__s1_767, __p2_767)))); \
-  __ret_767; \
+#define vqrshrun_high_n_s32(__p0_783, __p1_783, __p2_783) __extension__ ({ \
+  int16x4_t __s0_783 = __p0_783; \
+  int32x4_t __s1_783 = __p1_783; \
+  int16x8_t __ret_783; \
+  __ret_783 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_783), (int16x4_t)(vqrshrun_n_s32(__s1_783, __p2_783)))); \
+  __ret_783; \
 })
 #else
-#define vqrshrun_high_n_s32(__p0_768, __p1_768, __p2_768) __extension__ ({ \
-  int16x4_t __s0_768 = __p0_768; \
-  int32x4_t __s1_768 = __p1_768; \
-  int16x4_t __rev0_768;  __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 3, 2, 1, 0); \
-  int32x4_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \
-  int16x8_t __ret_768; \
-  __ret_768 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_768), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_768, __p2_768)))); \
-  __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_768; \
+#define vqrshrun_high_n_s32(__p0_784, __p1_784, __p2_784) __extension__ ({ \
+  int16x4_t __s0_784 = __p0_784; \
+  int32x4_t __s1_784 = __p1_784; \
+  int16x4_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 3, 2, 1, 0); \
+  int32x4_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 3, 2, 1, 0); \
+  int16x8_t __ret_784; \
+  __ret_784 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_784), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_784, __p2_784)))); \
+  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_784; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s64(__p0_769, __p1_769, __p2_769) __extension__ ({ \
-  int32x2_t __s0_769 = __p0_769; \
-  int64x2_t __s1_769 = __p1_769; \
-  int32x4_t __ret_769; \
-  __ret_769 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_769), (int32x2_t)(vqrshrun_n_s64(__s1_769, __p2_769)))); \
-  __ret_769; \
+#define vqrshrun_high_n_s64(__p0_785, __p1_785, __p2_785) __extension__ ({ \
+  int32x2_t __s0_785 = __p0_785; \
+  int64x2_t __s1_785 = __p1_785; \
+  int32x4_t __ret_785; \
+  __ret_785 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_785), (int32x2_t)(vqrshrun_n_s64(__s1_785, __p2_785)))); \
+  __ret_785; \
 })
 #else
-#define vqrshrun_high_n_s64(__p0_770, __p1_770, __p2_770) __extension__ ({ \
-  int32x2_t __s0_770 = __p0_770; \
-  int64x2_t __s1_770 = __p1_770; \
-  int32x2_t __rev0_770;  __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 1, 0); \
-  int64x2_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 1, 0); \
-  int32x4_t __ret_770; \
-  __ret_770 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_770), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_770, __p2_770)))); \
-  __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \
-  __ret_770; \
+#define vqrshrun_high_n_s64(__p0_786, __p1_786, __p2_786) __extension__ ({ \
+  int32x2_t __s0_786 = __p0_786; \
+  int64x2_t __s1_786 = __p1_786; \
+  int32x2_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 1, 0); \
+  int64x2_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 1, 0); \
+  int32x4_t __ret_786; \
+  __ret_786 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_786), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_786, __p2_786)))); \
+  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 3, 2, 1, 0); \
+  __ret_786; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s16(__p0_771, __p1_771, __p2_771) __extension__ ({ \
-  int8x8_t __s0_771 = __p0_771; \
-  int16x8_t __s1_771 = __p1_771; \
-  int8x16_t __ret_771; \
-  __ret_771 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_771), (int8x8_t)(vqrshrun_n_s16(__s1_771, __p2_771)))); \
-  __ret_771; \
+#define vqrshrun_high_n_s16(__p0_787, __p1_787, __p2_787) __extension__ ({ \
+  int8x8_t __s0_787 = __p0_787; \
+  int16x8_t __s1_787 = __p1_787; \
+  int8x16_t __ret_787; \
+  __ret_787 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_787), (int8x8_t)(vqrshrun_n_s16(__s1_787, __p2_787)))); \
+  __ret_787; \
 })
 #else
-#define vqrshrun_high_n_s16(__p0_772, __p1_772, __p2_772) __extension__ ({ \
-  int8x8_t __s0_772 = __p0_772; \
-  int16x8_t __s1_772 = __p1_772; \
-  int8x8_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_772; \
-  __ret_772 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_772), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_772, __p2_772)))); \
-  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_772; \
+#define vqrshrun_high_n_s16(__p0_788, __p1_788, __p2_788) __extension__ ({ \
+  int8x8_t __s0_788 = __p0_788; \
+  int16x8_t __s1_788 = __p1_788; \
+  int8x8_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_788; \
+  __ret_788 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_788), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_788, __p2_788)))); \
+  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_788; \
 })
 #endif
 
@@ -60527,128 +60715,128 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u32(__p0_773, __p1_773, __p2_773) __extension__ ({ \
-  uint16x4_t __s0_773 = __p0_773; \
-  uint32x4_t __s1_773 = __p1_773; \
-  uint16x8_t __ret_773; \
-  __ret_773 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_773), (uint16x4_t)(vqshrn_n_u32(__s1_773, __p2_773)))); \
-  __ret_773; \
+#define vqshrn_high_n_u32(__p0_789, __p1_789, __p2_789) __extension__ ({ \
+  uint16x4_t __s0_789 = __p0_789; \
+  uint32x4_t __s1_789 = __p1_789; \
+  uint16x8_t __ret_789; \
+  __ret_789 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_789), (uint16x4_t)(vqshrn_n_u32(__s1_789, __p2_789)))); \
+  __ret_789; \
 })
 #else
-#define vqshrn_high_n_u32(__p0_774, __p1_774, __p2_774) __extension__ ({ \
-  uint16x4_t __s0_774 = __p0_774; \
-  uint32x4_t __s1_774 = __p1_774; \
-  uint16x4_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \
-  uint32x4_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \
-  uint16x8_t __ret_774; \
-  __ret_774 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_774), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_774, __p2_774)))); \
-  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_774; \
+#define vqshrn_high_n_u32(__p0_790, __p1_790, __p2_790) __extension__ ({ \
+  uint16x4_t __s0_790 = __p0_790; \
+  uint32x4_t __s1_790 = __p1_790; \
+  uint16x4_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 3, 2, 1, 0); \
+  uint32x4_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 3, 2, 1, 0); \
+  uint16x8_t __ret_790; \
+  __ret_790 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_790), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_790, __p2_790)))); \
+  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_790; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u64(__p0_775, __p1_775, __p2_775) __extension__ ({ \
-  uint32x2_t __s0_775 = __p0_775; \
-  uint64x2_t __s1_775 = __p1_775; \
-  uint32x4_t __ret_775; \
-  __ret_775 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_775), (uint32x2_t)(vqshrn_n_u64(__s1_775, __p2_775)))); \
-  __ret_775; \
+#define vqshrn_high_n_u64(__p0_791, __p1_791, __p2_791) __extension__ ({ \
+  uint32x2_t __s0_791 = __p0_791; \
+  uint64x2_t __s1_791 = __p1_791; \
+  uint32x4_t __ret_791; \
+  __ret_791 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_791), (uint32x2_t)(vqshrn_n_u64(__s1_791, __p2_791)))); \
+  __ret_791; \
 })
 #else
-#define vqshrn_high_n_u64(__p0_776, __p1_776, __p2_776) __extension__ ({ \
-  uint32x2_t __s0_776 = __p0_776; \
-  uint64x2_t __s1_776 = __p1_776; \
-  uint32x2_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 1, 0); \
-  uint64x2_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 1, 0); \
-  uint32x4_t __ret_776; \
-  __ret_776 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_776), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_776, __p2_776)))); \
-  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 3, 2, 1, 0); \
-  __ret_776; \
+#define vqshrn_high_n_u64(__p0_792, __p1_792, __p2_792) __extension__ ({ \
+  uint32x2_t __s0_792 = __p0_792; \
+  uint64x2_t __s1_792 = __p1_792; \
+  uint32x2_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 1, 0); \
+  uint64x2_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 1, 0); \
+  uint32x4_t __ret_792; \
+  __ret_792 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_792), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_792, __p2_792)))); \
+  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 3, 2, 1, 0); \
+  __ret_792; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u16(__p0_777, __p1_777, __p2_777) __extension__ ({ \
-  uint8x8_t __s0_777 = __p0_777; \
-  uint16x8_t __s1_777 = __p1_777; \
-  uint8x16_t __ret_777; \
-  __ret_777 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_777), (uint8x8_t)(vqshrn_n_u16(__s1_777, __p2_777)))); \
-  __ret_777; \
+#define vqshrn_high_n_u16(__p0_793, __p1_793, __p2_793) __extension__ ({ \
+  uint8x8_t __s0_793 = __p0_793; \
+  uint16x8_t __s1_793 = __p1_793; \
+  uint8x16_t __ret_793; \
+  __ret_793 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_793), (uint8x8_t)(vqshrn_n_u16(__s1_793, __p2_793)))); \
+  __ret_793; \
 })
 #else
-#define vqshrn_high_n_u16(__p0_778, __p1_778, __p2_778) __extension__ ({ \
-  uint8x8_t __s0_778 = __p0_778; \
-  uint16x8_t __s1_778 = __p1_778; \
-  uint8x8_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_778; \
-  __ret_778 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_778), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_778, __p2_778)))); \
-  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_778; \
+#define vqshrn_high_n_u16(__p0_794, __p1_794, __p2_794) __extension__ ({ \
+  uint8x8_t __s0_794 = __p0_794; \
+  uint16x8_t __s1_794 = __p1_794; \
+  uint8x8_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_794; \
+  __ret_794 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_794), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_794, __p2_794)))); \
+  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_794; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s32(__p0_779, __p1_779, __p2_779) __extension__ ({ \
-  int16x4_t __s0_779 = __p0_779; \
-  int32x4_t __s1_779 = __p1_779; \
-  int16x8_t __ret_779; \
-  __ret_779 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_779), (int16x4_t)(vqshrn_n_s32(__s1_779, __p2_779)))); \
-  __ret_779; \
+#define vqshrn_high_n_s32(__p0_795, __p1_795, __p2_795) __extension__ ({ \
+  int16x4_t __s0_795 = __p0_795; \
+  int32x4_t __s1_795 = __p1_795; \
+  int16x8_t __ret_795; \
+  __ret_795 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_795), (int16x4_t)(vqshrn_n_s32(__s1_795, __p2_795)))); \
+  __ret_795; \
 })
 #else
-#define vqshrn_high_n_s32(__p0_780, __p1_780, __p2_780) __extension__ ({ \
-  int16x4_t __s0_780 = __p0_780; \
-  int32x4_t __s1_780 = __p1_780; \
-  int16x4_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \
-  int32x4_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 3, 2, 1, 0); \
-  int16x8_t __ret_780; \
-  __ret_780 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_780), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_780, __p2_780)))); \
-  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_780; \
+#define vqshrn_high_n_s32(__p0_796, __p1_796, __p2_796) __extension__ ({ \
+  int16x4_t __s0_796 = __p0_796; \
+  int32x4_t __s1_796 = __p1_796; \
+  int16x4_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 3, 2, 1, 0); \
+  int32x4_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 3, 2, 1, 0); \
+  int16x8_t __ret_796; \
+  __ret_796 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_796), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_796, __p2_796)))); \
+  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_796; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s64(__p0_781, __p1_781, __p2_781) __extension__ ({ \
-  int32x2_t __s0_781 = __p0_781; \
-  int64x2_t __s1_781 = __p1_781; \
-  int32x4_t __ret_781; \
-  __ret_781 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_781), (int32x2_t)(vqshrn_n_s64(__s1_781, __p2_781)))); \
-  __ret_781; \
+#define vqshrn_high_n_s64(__p0_797, __p1_797, __p2_797) __extension__ ({ \
+  int32x2_t __s0_797 = __p0_797; \
+  int64x2_t __s1_797 = __p1_797; \
+  int32x4_t __ret_797; \
+  __ret_797 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_797), (int32x2_t)(vqshrn_n_s64(__s1_797, __p2_797)))); \
+  __ret_797; \
 })
 #else
-#define vqshrn_high_n_s64(__p0_782, __p1_782, __p2_782) __extension__ ({ \
-  int32x2_t __s0_782 = __p0_782; \
-  int64x2_t __s1_782 = __p1_782; \
-  int32x2_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \
-  int64x2_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 1, 0); \
-  int32x4_t __ret_782; \
-  __ret_782 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_782), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_782, __p2_782)))); \
-  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 3, 2, 1, 0); \
-  __ret_782; \
+#define vqshrn_high_n_s64(__p0_798, __p1_798, __p2_798) __extension__ ({ \
+  int32x2_t __s0_798 = __p0_798; \
+  int64x2_t __s1_798 = __p1_798; \
+  int32x2_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 1, 0); \
+  int64x2_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 1, 0); \
+  int32x4_t __ret_798; \
+  __ret_798 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_798), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_798, __p2_798)))); \
+  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 3, 2, 1, 0); \
+  __ret_798; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s16(__p0_783, __p1_783, __p2_783) __extension__ ({ \
-  int8x8_t __s0_783 = __p0_783; \
-  int16x8_t __s1_783 = __p1_783; \
-  int8x16_t __ret_783; \
-  __ret_783 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_783), (int8x8_t)(vqshrn_n_s16(__s1_783, __p2_783)))); \
-  __ret_783; \
+#define vqshrn_high_n_s16(__p0_799, __p1_799, __p2_799) __extension__ ({ \
+  int8x8_t __s0_799 = __p0_799; \
+  int16x8_t __s1_799 = __p1_799; \
+  int8x16_t __ret_799; \
+  __ret_799 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_799), (int8x8_t)(vqshrn_n_s16(__s1_799, __p2_799)))); \
+  __ret_799; \
 })
 #else
-#define vqshrn_high_n_s16(__p0_784, __p1_784, __p2_784) __extension__ ({ \
-  int8x8_t __s0_784 = __p0_784; \
-  int16x8_t __s1_784 = __p1_784; \
-  int8x8_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_784; \
-  __ret_784 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_784), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_784, __p2_784)))); \
-  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_784; \
+#define vqshrn_high_n_s16(__p0_800, __p1_800, __p2_800) __extension__ ({ \
+  int8x8_t __s0_800 = __p0_800; \
+  int16x8_t __s1_800 = __p1_800; \
+  int8x8_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_800; \
+  __ret_800 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_800), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_800, __p2_800)))); \
+  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_800; \
 })
 #endif
 
@@ -60689,65 +60877,65 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s32(__p0_785, __p1_785, __p2_785) __extension__ ({ \
-  int16x4_t __s0_785 = __p0_785; \
-  int32x4_t __s1_785 = __p1_785; \
-  int16x8_t __ret_785; \
-  __ret_785 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_785), (int16x4_t)(vqshrun_n_s32(__s1_785, __p2_785)))); \
-  __ret_785; \
+#define vqshrun_high_n_s32(__p0_801, __p1_801, __p2_801) __extension__ ({ \
+  int16x4_t __s0_801 = __p0_801; \
+  int32x4_t __s1_801 = __p1_801; \
+  int16x8_t __ret_801; \
+  __ret_801 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_801), (int16x4_t)(vqshrun_n_s32(__s1_801, __p2_801)))); \
+  __ret_801; \
 })
 #else
-#define vqshrun_high_n_s32(__p0_786, __p1_786, __p2_786) __extension__ ({ \
-  int16x4_t __s0_786 = __p0_786; \
-  int32x4_t __s1_786 = __p1_786; \
-  int16x4_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 3, 2, 1, 0); \
-  int32x4_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 3, 2, 1, 0); \
-  int16x8_t __ret_786; \
-  __ret_786 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_786), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_786, __p2_786)))); \
-  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_786; \
+#define vqshrun_high_n_s32(__p0_802, __p1_802, __p2_802) __extension__ ({ \
+  int16x4_t __s0_802 = __p0_802; \
+  int32x4_t __s1_802 = __p1_802; \
+  int16x4_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 3, 2, 1, 0); \
+  int32x4_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 3, 2, 1, 0); \
+  int16x8_t __ret_802; \
+  __ret_802 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_802), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_802, __p2_802)))); \
+  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_802; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s64(__p0_787, __p1_787, __p2_787) __extension__ ({ \
-  int32x2_t __s0_787 = __p0_787; \
-  int64x2_t __s1_787 = __p1_787; \
-  int32x4_t __ret_787; \
-  __ret_787 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_787), (int32x2_t)(vqshrun_n_s64(__s1_787, __p2_787)))); \
-  __ret_787; \
+#define vqshrun_high_n_s64(__p0_803, __p1_803, __p2_803) __extension__ ({ \
+  int32x2_t __s0_803 = __p0_803; \
+  int64x2_t __s1_803 = __p1_803; \
+  int32x4_t __ret_803; \
+  __ret_803 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_803), (int32x2_t)(vqshrun_n_s64(__s1_803, __p2_803)))); \
+  __ret_803; \
 })
 #else
-#define vqshrun_high_n_s64(__p0_788, __p1_788, __p2_788) __extension__ ({ \
-  int32x2_t __s0_788 = __p0_788; \
-  int64x2_t __s1_788 = __p1_788; \
-  int32x2_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 1, 0); \
-  int64x2_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 1, 0); \
-  int32x4_t __ret_788; \
-  __ret_788 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_788), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_788, __p2_788)))); \
-  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 3, 2, 1, 0); \
-  __ret_788; \
+#define vqshrun_high_n_s64(__p0_804, __p1_804, __p2_804) __extension__ ({ \
+  int32x2_t __s0_804 = __p0_804; \
+  int64x2_t __s1_804 = __p1_804; \
+  int32x2_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 1, 0); \
+  int64x2_t __rev1_804;  __rev1_804 = __builtin_shufflevector(__s1_804, __s1_804, 1, 0); \
+  int32x4_t __ret_804; \
+  __ret_804 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_804), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_804, __p2_804)))); \
+  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 3, 2, 1, 0); \
+  __ret_804; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s16(__p0_789, __p1_789, __p2_789) __extension__ ({ \
-  int8x8_t __s0_789 = __p0_789; \
-  int16x8_t __s1_789 = __p1_789; \
-  int8x16_t __ret_789; \
-  __ret_789 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_789), (int8x8_t)(vqshrun_n_s16(__s1_789, __p2_789)))); \
-  __ret_789; \
+#define vqshrun_high_n_s16(__p0_805, __p1_805, __p2_805) __extension__ ({ \
+  int8x8_t __s0_805 = __p0_805; \
+  int16x8_t __s1_805 = __p1_805; \
+  int8x16_t __ret_805; \
+  __ret_805 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_805), (int8x8_t)(vqshrun_n_s16(__s1_805, __p2_805)))); \
+  __ret_805; \
 })
 #else
-#define vqshrun_high_n_s16(__p0_790, __p1_790, __p2_790) __extension__ ({ \
-  int8x8_t __s0_790 = __p0_790; \
-  int16x8_t __s1_790 = __p1_790; \
-  int8x8_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_790; \
-  __ret_790 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_790), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_790, __p2_790)))); \
-  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_790; \
+#define vqshrun_high_n_s16(__p0_806, __p1_806, __p2_806) __extension__ ({ \
+  int8x8_t __s0_806 = __p0_806; \
+  int16x8_t __s1_806 = __p1_806; \
+  int8x8_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_806;  __rev1_806 = __builtin_shufflevector(__s1_806, __s1_806, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_806; \
+  __ret_806 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_806), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_806, __p2_806)))); \
+  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_806; \
 })
 #endif
 
@@ -62057,128 +62245,128 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u32(__p0_791, __p1_791, __p2_791) __extension__ ({ \
-  uint16x4_t __s0_791 = __p0_791; \
-  uint32x4_t __s1_791 = __p1_791; \
-  uint16x8_t __ret_791; \
-  __ret_791 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_791), (uint16x4_t)(vrshrn_n_u32(__s1_791, __p2_791)))); \
-  __ret_791; \
+#define vrshrn_high_n_u32(__p0_807, __p1_807, __p2_807) __extension__ ({ \
+  uint16x4_t __s0_807 = __p0_807; \
+  uint32x4_t __s1_807 = __p1_807; \
+  uint16x8_t __ret_807; \
+  __ret_807 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_807), (uint16x4_t)(vrshrn_n_u32(__s1_807, __p2_807)))); \
+  __ret_807; \
 })
 #else
-#define vrshrn_high_n_u32(__p0_792, __p1_792, __p2_792) __extension__ ({ \
-  uint16x4_t __s0_792 = __p0_792; \
-  uint32x4_t __s1_792 = __p1_792; \
-  uint16x4_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 3, 2, 1, 0); \
-  uint32x4_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 3, 2, 1, 0); \
-  uint16x8_t __ret_792; \
-  __ret_792 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_792), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_792, __p2_792)))); \
-  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_792; \
+#define vrshrn_high_n_u32(__p0_808, __p1_808, __p2_808) __extension__ ({ \
+  uint16x4_t __s0_808 = __p0_808; \
+  uint32x4_t __s1_808 = __p1_808; \
+  uint16x4_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 3, 2, 1, 0); \
+  uint32x4_t __rev1_808;  __rev1_808 = __builtin_shufflevector(__s1_808, __s1_808, 3, 2, 1, 0); \
+  uint16x8_t __ret_808; \
+  __ret_808 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_808), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_808, __p2_808)))); \
+  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_808; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u64(__p0_793, __p1_793, __p2_793) __extension__ ({ \
-  uint32x2_t __s0_793 = __p0_793; \
-  uint64x2_t __s1_793 = __p1_793; \
-  uint32x4_t __ret_793; \
-  __ret_793 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_793), (uint32x2_t)(vrshrn_n_u64(__s1_793, __p2_793)))); \
-  __ret_793; \
+#define vrshrn_high_n_u64(__p0_809, __p1_809, __p2_809) __extension__ ({ \
+  uint32x2_t __s0_809 = __p0_809; \
+  uint64x2_t __s1_809 = __p1_809; \
+  uint32x4_t __ret_809; \
+  __ret_809 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_809), (uint32x2_t)(vrshrn_n_u64(__s1_809, __p2_809)))); \
+  __ret_809; \
 })
 #else
-#define vrshrn_high_n_u64(__p0_794, __p1_794, __p2_794) __extension__ ({ \
-  uint32x2_t __s0_794 = __p0_794; \
-  uint64x2_t __s1_794 = __p1_794; \
-  uint32x2_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 1, 0); \
-  uint64x2_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 1, 0); \
-  uint32x4_t __ret_794; \
-  __ret_794 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_794), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_794, __p2_794)))); \
-  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 3, 2, 1, 0); \
-  __ret_794; \
+#define vrshrn_high_n_u64(__p0_810, __p1_810, __p2_810) __extension__ ({ \
+  uint32x2_t __s0_810 = __p0_810; \
+  uint64x2_t __s1_810 = __p1_810; \
+  uint32x2_t __rev0_810;  __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 1, 0); \
+  uint64x2_t __rev1_810;  __rev1_810 = __builtin_shufflevector(__s1_810, __s1_810, 1, 0); \
+  uint32x4_t __ret_810; \
+  __ret_810 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_810), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_810, __p2_810)))); \
+  __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 3, 2, 1, 0); \
+  __ret_810; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u16(__p0_795, __p1_795, __p2_795) __extension__ ({ \
-  uint8x8_t __s0_795 = __p0_795; \
-  uint16x8_t __s1_795 = __p1_795; \
-  uint8x16_t __ret_795; \
-  __ret_795 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_795), (uint8x8_t)(vrshrn_n_u16(__s1_795, __p2_795)))); \
-  __ret_795; \
+#define vrshrn_high_n_u16(__p0_811, __p1_811, __p2_811) __extension__ ({ \
+  uint8x8_t __s0_811 = __p0_811; \
+  uint16x8_t __s1_811 = __p1_811; \
+  uint8x16_t __ret_811; \
+  __ret_811 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_811), (uint8x8_t)(vrshrn_n_u16(__s1_811, __p2_811)))); \
+  __ret_811; \
 })
 #else
-#define vrshrn_high_n_u16(__p0_796, __p1_796, __p2_796) __extension__ ({ \
-  uint8x8_t __s0_796 = __p0_796; \
-  uint16x8_t __s1_796 = __p1_796; \
-  uint8x8_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_796; \
-  __ret_796 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_796), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_796, __p2_796)))); \
-  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_796; \
+#define vrshrn_high_n_u16(__p0_812, __p1_812, __p2_812) __extension__ ({ \
+  uint8x8_t __s0_812 = __p0_812; \
+  uint16x8_t __s1_812 = __p1_812; \
+  uint8x8_t __rev0_812;  __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_812;  __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_812; \
+  __ret_812 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_812), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_812, __p2_812)))); \
+  __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_812; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s32(__p0_797, __p1_797, __p2_797) __extension__ ({ \
-  int16x4_t __s0_797 = __p0_797; \
-  int32x4_t __s1_797 = __p1_797; \
-  int16x8_t __ret_797; \
-  __ret_797 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_797), (int16x4_t)(vrshrn_n_s32(__s1_797, __p2_797)))); \
-  __ret_797; \
+#define vrshrn_high_n_s32(__p0_813, __p1_813, __p2_813) __extension__ ({ \
+  int16x4_t __s0_813 = __p0_813; \
+  int32x4_t __s1_813 = __p1_813; \
+  int16x8_t __ret_813; \
+  __ret_813 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_813), (int16x4_t)(vrshrn_n_s32(__s1_813, __p2_813)))); \
+  __ret_813; \
 })
 #else
-#define vrshrn_high_n_s32(__p0_798, __p1_798, __p2_798) __extension__ ({ \
-  int16x4_t __s0_798 = __p0_798; \
-  int32x4_t __s1_798 = __p1_798; \
-  int16x4_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 3, 2, 1, 0); \
-  int32x4_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 3, 2, 1, 0); \
-  int16x8_t __ret_798; \
-  __ret_798 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_798), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_798, __p2_798)))); \
-  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_798; \
+#define vrshrn_high_n_s32(__p0_814, __p1_814, __p2_814) __extension__ ({ \
+  int16x4_t __s0_814 = __p0_814; \
+  int32x4_t __s1_814 = __p1_814; \
+  int16x4_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 3, 2, 1, 0); \
+  int32x4_t __rev1_814;  __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 3, 2, 1, 0); \
+  int16x8_t __ret_814; \
+  __ret_814 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_814), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_814, __p2_814)))); \
+  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_814; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s64(__p0_799, __p1_799, __p2_799) __extension__ ({ \
-  int32x2_t __s0_799 = __p0_799; \
-  int64x2_t __s1_799 = __p1_799; \
-  int32x4_t __ret_799; \
-  __ret_799 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_799), (int32x2_t)(vrshrn_n_s64(__s1_799, __p2_799)))); \
-  __ret_799; \
+#define vrshrn_high_n_s64(__p0_815, __p1_815, __p2_815) __extension__ ({ \
+  int32x2_t __s0_815 = __p0_815; \
+  int64x2_t __s1_815 = __p1_815; \
+  int32x4_t __ret_815; \
+  __ret_815 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_815), (int32x2_t)(vrshrn_n_s64(__s1_815, __p2_815)))); \
+  __ret_815; \
 })
 #else
-#define vrshrn_high_n_s64(__p0_800, __p1_800, __p2_800) __extension__ ({ \
-  int32x2_t __s0_800 = __p0_800; \
-  int64x2_t __s1_800 = __p1_800; \
-  int32x2_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \
-  int64x2_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \
-  int32x4_t __ret_800; \
-  __ret_800 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_800), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_800, __p2_800)))); \
-  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 3, 2, 1, 0); \
-  __ret_800; \
+#define vrshrn_high_n_s64(__p0_816, __p1_816, __p2_816) __extension__ ({ \
+  int32x2_t __s0_816 = __p0_816; \
+  int64x2_t __s1_816 = __p1_816; \
+  int32x2_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 1, 0); \
+  int64x2_t __rev1_816;  __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 1, 0); \
+  int32x4_t __ret_816; \
+  __ret_816 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_816), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_816, __p2_816)))); \
+  __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 3, 2, 1, 0); \
+  __ret_816; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s16(__p0_801, __p1_801, __p2_801) __extension__ ({ \
-  int8x8_t __s0_801 = __p0_801; \
-  int16x8_t __s1_801 = __p1_801; \
-  int8x16_t __ret_801; \
-  __ret_801 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_801), (int8x8_t)(vrshrn_n_s16(__s1_801, __p2_801)))); \
-  __ret_801; \
+#define vrshrn_high_n_s16(__p0_817, __p1_817, __p2_817) __extension__ ({ \
+  int8x8_t __s0_817 = __p0_817; \
+  int16x8_t __s1_817 = __p1_817; \
+  int8x16_t __ret_817; \
+  __ret_817 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_817), (int8x8_t)(vrshrn_n_s16(__s1_817, __p2_817)))); \
+  __ret_817; \
 })
 #else
-#define vrshrn_high_n_s16(__p0_802, __p1_802, __p2_802) __extension__ ({ \
-  int8x8_t __s0_802 = __p0_802; \
-  int16x8_t __s1_802 = __p1_802; \
-  int8x8_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_802; \
-  __ret_802 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_802), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_802, __p2_802)))); \
-  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_802; \
+#define vrshrn_high_n_s16(__p0_818, __p1_818, __p2_818) __extension__ ({ \
+  int8x8_t __s0_818 = __p0_818; \
+  int16x8_t __s1_818 = __p1_818; \
+  int8x8_t __rev0_818;  __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_818;  __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_818; \
+  __ret_818 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_818), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_818, __p2_818)))); \
+  __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_818; \
 })
 #endif
 
@@ -62458,110 +62646,110 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u8(__p0_803, __p1_803) __extension__ ({ \
-  uint8x16_t __s0_803 = __p0_803; \
-  uint16x8_t __ret_803; \
-  __ret_803 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_803), __p1_803)); \
-  __ret_803; \
+#define vshll_high_n_u8(__p0_819, __p1_819) __extension__ ({ \
+  uint8x16_t __s0_819 = __p0_819; \
+  uint16x8_t __ret_819; \
+  __ret_819 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_819), __p1_819)); \
+  __ret_819; \
 })
 #else
-#define vshll_high_n_u8(__p0_804, __p1_804) __extension__ ({ \
-  uint8x16_t __s0_804 = __p0_804; \
-  uint8x16_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_804; \
-  __ret_804 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_804), __p1_804)); \
-  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_804; \
+#define vshll_high_n_u8(__p0_820, __p1_820) __extension__ ({ \
+  uint8x16_t __s0_820 = __p0_820; \
+  uint8x16_t __rev0_820;  __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_820; \
+  __ret_820 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_820), __p1_820)); \
+  __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_820; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u32(__p0_805, __p1_805) __extension__ ({ \
-  uint32x4_t __s0_805 = __p0_805; \
-  uint64x2_t __ret_805; \
-  __ret_805 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_805), __p1_805)); \
-  __ret_805; \
+#define vshll_high_n_u32(__p0_821, __p1_821) __extension__ ({ \
+  uint32x4_t __s0_821 = __p0_821; \
+  uint64x2_t __ret_821; \
+  __ret_821 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_821), __p1_821)); \
+  __ret_821; \
 })
 #else
-#define vshll_high_n_u32(__p0_806, __p1_806) __extension__ ({ \
-  uint32x4_t __s0_806 = __p0_806; \
-  uint32x4_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 3, 2, 1, 0); \
-  uint64x2_t __ret_806; \
-  __ret_806 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_806), __p1_806)); \
-  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 1, 0); \
-  __ret_806; \
+#define vshll_high_n_u32(__p0_822, __p1_822) __extension__ ({ \
+  uint32x4_t __s0_822 = __p0_822; \
+  uint32x4_t __rev0_822;  __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 3, 2, 1, 0); \
+  uint64x2_t __ret_822; \
+  __ret_822 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_822), __p1_822)); \
+  __ret_822 = __builtin_shufflevector(__ret_822, __ret_822, 1, 0); \
+  __ret_822; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u16(__p0_807, __p1_807) __extension__ ({ \
-  uint16x8_t __s0_807 = __p0_807; \
-  uint32x4_t __ret_807; \
-  __ret_807 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_807), __p1_807)); \
-  __ret_807; \
+#define vshll_high_n_u16(__p0_823, __p1_823) __extension__ ({ \
+  uint16x8_t __s0_823 = __p0_823; \
+  uint32x4_t __ret_823; \
+  __ret_823 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_823), __p1_823)); \
+  __ret_823; \
 })
 #else
-#define vshll_high_n_u16(__p0_808, __p1_808) __extension__ ({ \
-  uint16x8_t __s0_808 = __p0_808; \
-  uint16x8_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_808; \
-  __ret_808 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_808), __p1_808)); \
-  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 3, 2, 1, 0); \
-  __ret_808; \
+#define vshll_high_n_u16(__p0_824, __p1_824) __extension__ ({ \
+  uint16x8_t __s0_824 = __p0_824; \
+  uint16x8_t __rev0_824;  __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_824; \
+  __ret_824 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_824), __p1_824)); \
+  __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 3, 2, 1, 0); \
+  __ret_824; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s8(__p0_809, __p1_809) __extension__ ({ \
-  int8x16_t __s0_809 = __p0_809; \
-  int16x8_t __ret_809; \
-  __ret_809 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_809), __p1_809)); \
-  __ret_809; \
+#define vshll_high_n_s8(__p0_825, __p1_825) __extension__ ({ \
+  int8x16_t __s0_825 = __p0_825; \
+  int16x8_t __ret_825; \
+  __ret_825 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_825), __p1_825)); \
+  __ret_825; \
 })
 #else
-#define vshll_high_n_s8(__p0_810, __p1_810) __extension__ ({ \
-  int8x16_t __s0_810 = __p0_810; \
-  int8x16_t __rev0_810;  __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_810; \
-  __ret_810 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_810), __p1_810)); \
-  __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_810; \
+#define vshll_high_n_s8(__p0_826, __p1_826) __extension__ ({ \
+  int8x16_t __s0_826 = __p0_826; \
+  int8x16_t __rev0_826;  __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_826; \
+  __ret_826 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_826), __p1_826)); \
+  __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_826; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s32(__p0_811, __p1_811) __extension__ ({ \
-  int32x4_t __s0_811 = __p0_811; \
-  int64x2_t __ret_811; \
-  __ret_811 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_811), __p1_811)); \
-  __ret_811; \
+#define vshll_high_n_s32(__p0_827, __p1_827) __extension__ ({ \
+  int32x4_t __s0_827 = __p0_827; \
+  int64x2_t __ret_827; \
+  __ret_827 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_827), __p1_827)); \
+  __ret_827; \
 })
 #else
-#define vshll_high_n_s32(__p0_812, __p1_812) __extension__ ({ \
-  int32x4_t __s0_812 = __p0_812; \
-  int32x4_t __rev0_812;  __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 3, 2, 1, 0); \
-  int64x2_t __ret_812; \
-  __ret_812 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_812), __p1_812)); \
-  __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 1, 0); \
-  __ret_812; \
+#define vshll_high_n_s32(__p0_828, __p1_828) __extension__ ({ \
+  int32x4_t __s0_828 = __p0_828; \
+  int32x4_t __rev0_828;  __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \
+  int64x2_t __ret_828; \
+  __ret_828 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_828), __p1_828)); \
+  __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 1, 0); \
+  __ret_828; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s16(__p0_813, __p1_813) __extension__ ({ \
-  int16x8_t __s0_813 = __p0_813; \
-  int32x4_t __ret_813; \
-  __ret_813 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_813), __p1_813)); \
-  __ret_813; \
+#define vshll_high_n_s16(__p0_829, __p1_829) __extension__ ({ \
+  int16x8_t __s0_829 = __p0_829; \
+  int32x4_t __ret_829; \
+  __ret_829 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_829), __p1_829)); \
+  __ret_829; \
 })
 #else
-#define vshll_high_n_s16(__p0_814, __p1_814) __extension__ ({ \
-  int16x8_t __s0_814 = __p0_814; \
-  int16x8_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_814; \
-  __ret_814 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_814), __p1_814)); \
-  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 3, 2, 1, 0); \
-  __ret_814; \
+#define vshll_high_n_s16(__p0_830, __p1_830) __extension__ ({ \
+  int16x8_t __s0_830 = __p0_830; \
+  int16x8_t __rev0_830;  __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_830; \
+  __ret_830 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_830), __p1_830)); \
+  __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 3, 2, 1, 0); \
+  __ret_830; \
 })
 #endif
 
@@ -62578,128 +62766,128 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u32(__p0_815, __p1_815, __p2_815) __extension__ ({ \
-  uint16x4_t __s0_815 = __p0_815; \
-  uint32x4_t __s1_815 = __p1_815; \
-  uint16x8_t __ret_815; \
-  __ret_815 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_815), (uint16x4_t)(vshrn_n_u32(__s1_815, __p2_815)))); \
-  __ret_815; \
+#define vshrn_high_n_u32(__p0_831, __p1_831, __p2_831) __extension__ ({ \
+  uint16x4_t __s0_831 = __p0_831; \
+  uint32x4_t __s1_831 = __p1_831; \
+  uint16x8_t __ret_831; \
+  __ret_831 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_831), (uint16x4_t)(vshrn_n_u32(__s1_831, __p2_831)))); \
+  __ret_831; \
 })
 #else
-#define vshrn_high_n_u32(__p0_816, __p1_816, __p2_816) __extension__ ({ \
-  uint16x4_t __s0_816 = __p0_816; \
-  uint32x4_t __s1_816 = __p1_816; \
-  uint16x4_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 3, 2, 1, 0); \
-  uint32x4_t __rev1_816;  __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 3, 2, 1, 0); \
-  uint16x8_t __ret_816; \
-  __ret_816 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_816), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_816, __p2_816)))); \
-  __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_816; \
+#define vshrn_high_n_u32(__p0_832, __p1_832, __p2_832) __extension__ ({ \
+  uint16x4_t __s0_832 = __p0_832; \
+  uint32x4_t __s1_832 = __p1_832; \
+  uint16x4_t __rev0_832;  __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 3, 2, 1, 0); \
+  uint32x4_t __rev1_832;  __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 3, 2, 1, 0); \
+  uint16x8_t __ret_832; \
+  __ret_832 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_832), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_832, __p2_832)))); \
+  __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_832; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u64(__p0_817, __p1_817, __p2_817) __extension__ ({ \
-  uint32x2_t __s0_817 = __p0_817; \
-  uint64x2_t __s1_817 = __p1_817; \
-  uint32x4_t __ret_817; \
-  __ret_817 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_817), (uint32x2_t)(vshrn_n_u64(__s1_817, __p2_817)))); \
-  __ret_817; \
+#define vshrn_high_n_u64(__p0_833, __p1_833, __p2_833) __extension__ ({ \
+  uint32x2_t __s0_833 = __p0_833; \
+  uint64x2_t __s1_833 = __p1_833; \
+  uint32x4_t __ret_833; \
+  __ret_833 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_833), (uint32x2_t)(vshrn_n_u64(__s1_833, __p2_833)))); \
+  __ret_833; \
 })
 #else
-#define vshrn_high_n_u64(__p0_818, __p1_818, __p2_818) __extension__ ({ \
-  uint32x2_t __s0_818 = __p0_818; \
-  uint64x2_t __s1_818 = __p1_818; \
-  uint32x2_t __rev0_818;  __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 1, 0); \
-  uint64x2_t __rev1_818;  __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 1, 0); \
-  uint32x4_t __ret_818; \
-  __ret_818 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_818), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_818, __p2_818)))); \
-  __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 3, 2, 1, 0); \
-  __ret_818; \
+#define vshrn_high_n_u64(__p0_834, __p1_834, __p2_834) __extension__ ({ \
+  uint32x2_t __s0_834 = __p0_834; \
+  uint64x2_t __s1_834 = __p1_834; \
+  uint32x2_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \
+  uint64x2_t __rev1_834;  __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 1, 0); \
+  uint32x4_t __ret_834; \
+  __ret_834 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_834), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_834, __p2_834)))); \
+  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 3, 2, 1, 0); \
+  __ret_834; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u16(__p0_819, __p1_819, __p2_819) __extension__ ({ \
-  uint8x8_t __s0_819 = __p0_819; \
-  uint16x8_t __s1_819 = __p1_819; \
-  uint8x16_t __ret_819; \
-  __ret_819 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_819), (uint8x8_t)(vshrn_n_u16(__s1_819, __p2_819)))); \
-  __ret_819; \
+#define vshrn_high_n_u16(__p0_835, __p1_835, __p2_835) __extension__ ({ \
+  uint8x8_t __s0_835 = __p0_835; \
+  uint16x8_t __s1_835 = __p1_835; \
+  uint8x16_t __ret_835; \
+  __ret_835 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_835), (uint8x8_t)(vshrn_n_u16(__s1_835, __p2_835)))); \
+  __ret_835; \
 })
 #else
-#define vshrn_high_n_u16(__p0_820, __p1_820, __p2_820) __extension__ ({ \
-  uint8x8_t __s0_820 = __p0_820; \
-  uint16x8_t __s1_820 = __p1_820; \
-  uint8x8_t __rev0_820;  __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_820;  __rev1_820 = __builtin_shufflevector(__s1_820, __s1_820, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_820; \
-  __ret_820 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_820), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_820, __p2_820)))); \
-  __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_820; \
+#define vshrn_high_n_u16(__p0_836, __p1_836, __p2_836) __extension__ ({ \
+  uint8x8_t __s0_836 = __p0_836; \
+  uint16x8_t __s1_836 = __p1_836; \
+  uint8x8_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_836;  __rev1_836 = __builtin_shufflevector(__s1_836, __s1_836, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_836; \
+  __ret_836 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_836), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_836, __p2_836)))); \
+  __ret_836 = __builtin_shufflevector(__ret_836, __ret_836, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_836; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s32(__p0_821, __p1_821, __p2_821) __extension__ ({ \
-  int16x4_t __s0_821 = __p0_821; \
-  int32x4_t __s1_821 = __p1_821; \
-  int16x8_t __ret_821; \
-  __ret_821 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_821), (int16x4_t)(vshrn_n_s32(__s1_821, __p2_821)))); \
-  __ret_821; \
+#define vshrn_high_n_s32(__p0_837, __p1_837, __p2_837) __extension__ ({ \
+  int16x4_t __s0_837 = __p0_837; \
+  int32x4_t __s1_837 = __p1_837; \
+  int16x8_t __ret_837; \
+  __ret_837 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_837), (int16x4_t)(vshrn_n_s32(__s1_837, __p2_837)))); \
+  __ret_837; \
 })
 #else
-#define vshrn_high_n_s32(__p0_822, __p1_822, __p2_822) __extension__ ({ \
-  int16x4_t __s0_822 = __p0_822; \
-  int32x4_t __s1_822 = __p1_822; \
-  int16x4_t __rev0_822;  __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 3, 2, 1, 0); \
-  int32x4_t __rev1_822;  __rev1_822 = __builtin_shufflevector(__s1_822, __s1_822, 3, 2, 1, 0); \
-  int16x8_t __ret_822; \
-  __ret_822 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_822), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_822, __p2_822)))); \
-  __ret_822 = __builtin_shufflevector(__ret_822, __ret_822, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_822; \
+#define vshrn_high_n_s32(__p0_838, __p1_838, __p2_838) __extension__ ({ \
+  int16x4_t __s0_838 = __p0_838; \
+  int32x4_t __s1_838 = __p1_838; \
+  int16x4_t __rev0_838;  __rev0_838 = __builtin_shufflevector(__s0_838, __s0_838, 3, 2, 1, 0); \
+  int32x4_t __rev1_838;  __rev1_838 = __builtin_shufflevector(__s1_838, __s1_838, 3, 2, 1, 0); \
+  int16x8_t __ret_838; \
+  __ret_838 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_838), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_838, __p2_838)))); \
+  __ret_838 = __builtin_shufflevector(__ret_838, __ret_838, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_838; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s64(__p0_823, __p1_823, __p2_823) __extension__ ({ \
-  int32x2_t __s0_823 = __p0_823; \
-  int64x2_t __s1_823 = __p1_823; \
-  int32x4_t __ret_823; \
-  __ret_823 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_823), (int32x2_t)(vshrn_n_s64(__s1_823, __p2_823)))); \
-  __ret_823; \
+#define vshrn_high_n_s64(__p0_839, __p1_839, __p2_839) __extension__ ({ \
+  int32x2_t __s0_839 = __p0_839; \
+  int64x2_t __s1_839 = __p1_839; \
+  int32x4_t __ret_839; \
+  __ret_839 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_839), (int32x2_t)(vshrn_n_s64(__s1_839, __p2_839)))); \
+  __ret_839; \
 })
 #else
-#define vshrn_high_n_s64(__p0_824, __p1_824, __p2_824) __extension__ ({ \
-  int32x2_t __s0_824 = __p0_824; \
-  int64x2_t __s1_824 = __p1_824; \
-  int32x2_t __rev0_824;  __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 1, 0); \
-  int64x2_t __rev1_824;  __rev1_824 = __builtin_shufflevector(__s1_824, __s1_824, 1, 0); \
-  int32x4_t __ret_824; \
-  __ret_824 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_824), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_824, __p2_824)))); \
-  __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 3, 2, 1, 0); \
-  __ret_824; \
+#define vshrn_high_n_s64(__p0_840, __p1_840, __p2_840) __extension__ ({ \
+  int32x2_t __s0_840 = __p0_840; \
+  int64x2_t __s1_840 = __p1_840; \
+  int32x2_t __rev0_840;  __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, 1, 0); \
+  int64x2_t __rev1_840;  __rev1_840 = __builtin_shufflevector(__s1_840, __s1_840, 1, 0); \
+  int32x4_t __ret_840; \
+  __ret_840 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_840), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_840, __p2_840)))); \
+  __ret_840 = __builtin_shufflevector(__ret_840, __ret_840, 3, 2, 1, 0); \
+  __ret_840; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s16(__p0_825, __p1_825, __p2_825) __extension__ ({ \
-  int8x8_t __s0_825 = __p0_825; \
-  int16x8_t __s1_825 = __p1_825; \
-  int8x16_t __ret_825; \
-  __ret_825 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_825), (int8x8_t)(vshrn_n_s16(__s1_825, __p2_825)))); \
-  __ret_825; \
+#define vshrn_high_n_s16(__p0_841, __p1_841, __p2_841) __extension__ ({ \
+  int8x8_t __s0_841 = __p0_841; \
+  int16x8_t __s1_841 = __p1_841; \
+  int8x16_t __ret_841; \
+  __ret_841 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_841), (int8x8_t)(vshrn_n_s16(__s1_841, __p2_841)))); \
+  __ret_841; \
 })
 #else
-#define vshrn_high_n_s16(__p0_826, __p1_826, __p2_826) __extension__ ({ \
-  int8x8_t __s0_826 = __p0_826; \
-  int16x8_t __s1_826 = __p1_826; \
-  int8x8_t __rev0_826;  __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_826;  __rev1_826 = __builtin_shufflevector(__s1_826, __s1_826, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_826; \
-  __ret_826 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_826), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_826, __p2_826)))); \
-  __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_826; \
+#define vshrn_high_n_s16(__p0_842, __p1_842, __p2_842) __extension__ ({ \
+  int8x8_t __s0_842 = __p0_842; \
+  int16x8_t __s1_842 = __p1_842; \
+  int8x8_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_842;  __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_842; \
+  __ret_842 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_842), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_842, __p2_842)))); \
+  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_842; \
 })
 #endif
 
@@ -64135,54 +64323,54 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsudotq_laneq_s32(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \
-  int32x4_t __s0_827 = __p0_827; \
-  int8x16_t __s1_827 = __p1_827; \
-  uint8x16_t __s2_827 = __p2_827; \
-  int32x4_t __ret_827; \
-uint8x16_t __reint_827 = __s2_827; \
-  __ret_827 = vusdotq_s32(__s0_827, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_827, __p3_827)), __s1_827); \
-  __ret_827; \
+#define vsudotq_laneq_s32(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
+  int32x4_t __s0_843 = __p0_843; \
+  int8x16_t __s1_843 = __p1_843; \
+  uint8x16_t __s2_843 = __p2_843; \
+  int32x4_t __ret_843; \
+uint8x16_t __reint_843 = __s2_843; \
+  __ret_843 = vusdotq_s32(__s0_843, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_843, __p3_843)), __s1_843); \
+  __ret_843; \
 })
 #else
-#define vsudotq_laneq_s32(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \
-  int32x4_t __s0_828 = __p0_828; \
-  int8x16_t __s1_828 = __p1_828; \
-  uint8x16_t __s2_828 = __p2_828; \
-  int32x4_t __rev0_828;  __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \
-  int8x16_t __rev1_828;  __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_828;  __rev2_828 = __builtin_shufflevector(__s2_828, __s2_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_828; \
-uint8x16_t __reint_828 = __rev2_828; \
-  __ret_828 = __noswap_vusdotq_s32(__rev0_828, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_828, __p3_828)), __rev1_828); \
-  __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 3, 2, 1, 0); \
-  __ret_828; \
+#define vsudotq_laneq_s32(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
+  int32x4_t __s0_844 = __p0_844; \
+  int8x16_t __s1_844 = __p1_844; \
+  uint8x16_t __s2_844 = __p2_844; \
+  int32x4_t __rev0_844;  __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \
+  int8x16_t __rev1_844;  __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_844; \
+uint8x16_t __reint_844 = __rev2_844; \
+  __ret_844 = __noswap_vusdotq_s32(__rev0_844, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_844, __p3_844)), __rev1_844); \
+  __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \
+  __ret_844; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsudot_laneq_s32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \
-  int32x2_t __s0_829 = __p0_829; \
-  int8x8_t __s1_829 = __p1_829; \
-  uint8x16_t __s2_829 = __p2_829; \
-  int32x2_t __ret_829; \
-uint8x16_t __reint_829 = __s2_829; \
-  __ret_829 = vusdot_s32(__s0_829, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_829, __p3_829)), __s1_829); \
-  __ret_829; \
+#define vsudot_laneq_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
+  int32x2_t __s0_845 = __p0_845; \
+  int8x8_t __s1_845 = __p1_845; \
+  uint8x16_t __s2_845 = __p2_845; \
+  int32x2_t __ret_845; \
+uint8x16_t __reint_845 = __s2_845; \
+  __ret_845 = vusdot_s32(__s0_845, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_845, __p3_845)), __s1_845); \
+  __ret_845; \
 })
 #else
-#define vsudot_laneq_s32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \
-  int32x2_t __s0_830 = __p0_830; \
-  int8x8_t __s1_830 = __p1_830; \
-  uint8x16_t __s2_830 = __p2_830; \
-  int32x2_t __rev0_830;  __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 1, 0); \
-  int8x8_t __rev1_830;  __rev1_830 = __builtin_shufflevector(__s1_830, __s1_830, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_830;  __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_830; \
-uint8x16_t __reint_830 = __rev2_830; \
-  __ret_830 = __noswap_vusdot_s32(__rev0_830, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_830, __p3_830)), __rev1_830); \
-  __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 1, 0); \
-  __ret_830; \
+#define vsudot_laneq_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
+  int32x2_t __s0_846 = __p0_846; \
+  int8x8_t __s1_846 = __p1_846; \
+  uint8x16_t __s2_846 = __p2_846; \
+  int32x2_t __rev0_846;  __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \
+  int8x8_t __rev1_846;  __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_846; \
+uint8x16_t __reint_846 = __rev2_846; \
+  __ret_846 = __noswap_vusdot_s32(__rev0_846, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_846, __p3_846)), __rev1_846); \
+  __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \
+  __ret_846; \
 })
 #endif
 
@@ -65155,54 +65343,54 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vusdotq_laneq_s32(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \
-  int32x4_t __s0_831 = __p0_831; \
-  uint8x16_t __s1_831 = __p1_831; \
-  int8x16_t __s2_831 = __p2_831; \
-  int32x4_t __ret_831; \
-int8x16_t __reint_831 = __s2_831; \
-  __ret_831 = vusdotq_s32(__s0_831, __s1_831, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_831, __p3_831))); \
-  __ret_831; \
+#define vusdotq_laneq_s32(__p0_847, __p1_847, __p2_847, __p3_847) __extension__ ({ \
+  int32x4_t __s0_847 = __p0_847; \
+  uint8x16_t __s1_847 = __p1_847; \
+  int8x16_t __s2_847 = __p2_847; \
+  int32x4_t __ret_847; \
+int8x16_t __reint_847 = __s2_847; \
+  __ret_847 = vusdotq_s32(__s0_847, __s1_847, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_847, __p3_847))); \
+  __ret_847; \
 })
 #else
-#define vusdotq_laneq_s32(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \
-  int32x4_t __s0_832 = __p0_832; \
-  uint8x16_t __s1_832 = __p1_832; \
-  int8x16_t __s2_832 = __p2_832; \
-  int32x4_t __rev0_832;  __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 3, 2, 1, 0); \
-  uint8x16_t __rev1_832;  __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_832;  __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_832; \
-int8x16_t __reint_832 = __rev2_832; \
-  __ret_832 = __noswap_vusdotq_s32(__rev0_832, __rev1_832, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_832, __p3_832))); \
-  __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 3, 2, 1, 0); \
-  __ret_832; \
+#define vusdotq_laneq_s32(__p0_848, __p1_848, __p2_848, __p3_848) __extension__ ({ \
+  int32x4_t __s0_848 = __p0_848; \
+  uint8x16_t __s1_848 = __p1_848; \
+  int8x16_t __s2_848 = __p2_848; \
+  int32x4_t __rev0_848;  __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \
+  uint8x16_t __rev1_848;  __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_848;  __rev2_848 = __builtin_shufflevector(__s2_848, __s2_848, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_848; \
+int8x16_t __reint_848 = __rev2_848; \
+  __ret_848 = __noswap_vusdotq_s32(__rev0_848, __rev1_848, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_848, __p3_848))); \
+  __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 3, 2, 1, 0); \
+  __ret_848; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vusdot_laneq_s32(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \
-  int32x2_t __s0_833 = __p0_833; \
-  uint8x8_t __s1_833 = __p1_833; \
-  int8x16_t __s2_833 = __p2_833; \
-  int32x2_t __ret_833; \
-int8x16_t __reint_833 = __s2_833; \
-  __ret_833 = vusdot_s32(__s0_833, __s1_833, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_833, __p3_833))); \
-  __ret_833; \
+#define vusdot_laneq_s32(__p0_849, __p1_849, __p2_849, __p3_849) __extension__ ({ \
+  int32x2_t __s0_849 = __p0_849; \
+  uint8x8_t __s1_849 = __p1_849; \
+  int8x16_t __s2_849 = __p2_849; \
+  int32x2_t __ret_849; \
+int8x16_t __reint_849 = __s2_849; \
+  __ret_849 = vusdot_s32(__s0_849, __s1_849, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_849, __p3_849))); \
+  __ret_849; \
 })
 #else
-#define vusdot_laneq_s32(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \
-  int32x2_t __s0_834 = __p0_834; \
-  uint8x8_t __s1_834 = __p1_834; \
-  int8x16_t __s2_834 = __p2_834; \
-  int32x2_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \
-  uint8x8_t __rev1_834;  __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_834;  __rev2_834 = __builtin_shufflevector(__s2_834, __s2_834, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_834; \
-int8x16_t __reint_834 = __rev2_834; \
-  __ret_834 = __noswap_vusdot_s32(__rev0_834, __rev1_834, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_834, __p3_834))); \
-  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 1, 0); \
-  __ret_834; \
+#define vusdot_laneq_s32(__p0_850, __p1_850, __p2_850, __p3_850) __extension__ ({ \
+  int32x2_t __s0_850 = __p0_850; \
+  uint8x8_t __s1_850 = __p1_850; \
+  int8x16_t __s2_850 = __p2_850; \
+  int32x2_t __rev0_850;  __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 1, 0); \
+  uint8x8_t __rev1_850;  __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_850;  __rev2_850 = __builtin_shufflevector(__s2_850, __s2_850, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_850; \
+int8x16_t __reint_850 = __rev2_850; \
+  __ret_850 = __noswap_vusdot_s32(__rev0_850, __rev1_850, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_850, __p3_850))); \
+  __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 1, 0); \
+  __ret_850; \
 })
 #endif
 
@@ -67256,60 +67444,60 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vget_lane_f16(__p0_835, __p1_835) __extension__ ({ \
-  float16x4_t __s0_835 = __p0_835; \
-  float16_t __ret_835; \
-float16x4_t __reint_835 = __s0_835; \
-int16_t __reint1_835 = vget_lane_s16(*(int16x4_t *) &__reint_835, __p1_835); \
-  __ret_835 = *(float16_t *) &__reint1_835; \
-  __ret_835; \
+#define vget_lane_f16(__p0_851, __p1_851) __extension__ ({ \
+  float16x4_t __s0_851 = __p0_851; \
+  float16_t __ret_851; \
+float16x4_t __reint_851 = __s0_851; \
+int16_t __reint1_851 = vget_lane_s16(*(int16x4_t *) &__reint_851, __p1_851); \
+  __ret_851 = *(float16_t *) &__reint1_851; \
+  __ret_851; \
 })
 #else
-#define vget_lane_f16(__p0_836, __p1_836) __extension__ ({ \
-  float16x4_t __s0_836 = __p0_836; \
-  float16x4_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 3, 2, 1, 0); \
-  float16_t __ret_836; \
-float16x4_t __reint_836 = __rev0_836; \
-int16_t __reint1_836 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_836, __p1_836); \
-  __ret_836 = *(float16_t *) &__reint1_836; \
-  __ret_836; \
+#define vget_lane_f16(__p0_852, __p1_852) __extension__ ({ \
+  float16x4_t __s0_852 = __p0_852; \
+  float16x4_t __rev0_852;  __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \
+  float16_t __ret_852; \
+float16x4_t __reint_852 = __rev0_852; \
+int16_t __reint1_852 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_852, __p1_852); \
+  __ret_852 = *(float16_t *) &__reint1_852; \
+  __ret_852; \
 })
-#define __noswap_vget_lane_f16(__p0_837, __p1_837) __extension__ ({ \
-  float16x4_t __s0_837 = __p0_837; \
-  float16_t __ret_837; \
-float16x4_t __reint_837 = __s0_837; \
-int16_t __reint1_837 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_837, __p1_837); \
-  __ret_837 = *(float16_t *) &__reint1_837; \
-  __ret_837; \
+#define __noswap_vget_lane_f16(__p0_853, __p1_853) __extension__ ({ \
+  float16x4_t __s0_853 = __p0_853; \
+  float16_t __ret_853; \
+float16x4_t __reint_853 = __s0_853; \
+int16_t __reint1_853 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_853, __p1_853); \
+  __ret_853 = *(float16_t *) &__reint1_853; \
+  __ret_853; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f16(__p0_838, __p1_838) __extension__ ({ \
-  float16x8_t __s0_838 = __p0_838; \
-  float16_t __ret_838; \
-float16x8_t __reint_838 = __s0_838; \
-int16_t __reint1_838 = vgetq_lane_s16(*(int16x8_t *) &__reint_838, __p1_838); \
-  __ret_838 = *(float16_t *) &__reint1_838; \
-  __ret_838; \
+#define vgetq_lane_f16(__p0_854, __p1_854) __extension__ ({ \
+  float16x8_t __s0_854 = __p0_854; \
+  float16_t __ret_854; \
+float16x8_t __reint_854 = __s0_854; \
+int16_t __reint1_854 = vgetq_lane_s16(*(int16x8_t *) &__reint_854, __p1_854); \
+  __ret_854 = *(float16_t *) &__reint1_854; \
+  __ret_854; \
 })
 #else
-#define vgetq_lane_f16(__p0_839, __p1_839) __extension__ ({ \
-  float16x8_t __s0_839 = __p0_839; \
-  float16x8_t __rev0_839;  __rev0_839 = __builtin_shufflevector(__s0_839, __s0_839, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_839; \
-float16x8_t __reint_839 = __rev0_839; \
-int16_t __reint1_839 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_839, __p1_839); \
-  __ret_839 = *(float16_t *) &__reint1_839; \
-  __ret_839; \
+#define vgetq_lane_f16(__p0_855, __p1_855) __extension__ ({ \
+  float16x8_t __s0_855 = __p0_855; \
+  float16x8_t __rev0_855;  __rev0_855 = __builtin_shufflevector(__s0_855, __s0_855, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16_t __ret_855; \
+float16x8_t __reint_855 = __rev0_855; \
+int16_t __reint1_855 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_855, __p1_855); \
+  __ret_855 = *(float16_t *) &__reint1_855; \
+  __ret_855; \
 })
-#define __noswap_vgetq_lane_f16(__p0_840, __p1_840) __extension__ ({ \
-  float16x8_t __s0_840 = __p0_840; \
-  float16_t __ret_840; \
-float16x8_t __reint_840 = __s0_840; \
-int16_t __reint1_840 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_840, __p1_840); \
-  __ret_840 = *(float16_t *) &__reint1_840; \
-  __ret_840; \
+#define __noswap_vgetq_lane_f16(__p0_856, __p1_856) __extension__ ({ \
+  float16x8_t __s0_856 = __p0_856; \
+  float16_t __ret_856; \
+float16x8_t __reint_856 = __s0_856; \
+int16_t __reint1_856 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_856, __p1_856); \
+  __ret_856 = *(float16_t *) &__reint1_856; \
+  __ret_856; \
 })
 #endif
 
@@ -67452,98 +67640,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \
-  uint64x2_t __s0_841 = __p0_841; \
-  uint32x2_t __s1_841 = __p1_841; \
-  uint32x2_t __s2_841 = __p2_841; \
-  uint64x2_t __ret_841; \
-  __ret_841 = __s0_841 + vmull_u32(__s1_841, splat_lane_u32(__s2_841, __p3_841)); \
-  __ret_841; \
+#define vmlal_lane_u32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \
+  uint64x2_t __s0_857 = __p0_857; \
+  uint32x2_t __s1_857 = __p1_857; \
+  uint32x2_t __s2_857 = __p2_857; \
+  uint64x2_t __ret_857; \
+  __ret_857 = __s0_857 + vmull_u32(__s1_857, splat_lane_u32(__s2_857, __p3_857)); \
+  __ret_857; \
 })
 #else
-#define vmlal_lane_u32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \
-  uint64x2_t __s0_842 = __p0_842; \
-  uint32x2_t __s1_842 = __p1_842; \
-  uint32x2_t __s2_842 = __p2_842; \
-  uint64x2_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \
-  uint32x2_t __rev1_842;  __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 1, 0); \
-  uint32x2_t __rev2_842;  __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 1, 0); \
-  uint64x2_t __ret_842; \
-  __ret_842 = __rev0_842 + __noswap_vmull_u32(__rev1_842, __noswap_splat_lane_u32(__rev2_842, __p3_842)); \
-  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \
-  __ret_842; \
+#define vmlal_lane_u32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \
+  uint64x2_t __s0_858 = __p0_858; \
+  uint32x2_t __s1_858 = __p1_858; \
+  uint32x2_t __s2_858 = __p2_858; \
+  uint64x2_t __rev0_858;  __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \
+  uint32x2_t __rev1_858;  __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 1, 0); \
+  uint32x2_t __rev2_858;  __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 1, 0); \
+  uint64x2_t __ret_858; \
+  __ret_858 = __rev0_858 + __noswap_vmull_u32(__rev1_858, __noswap_splat_lane_u32(__rev2_858, __p3_858)); \
+  __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 1, 0); \
+  __ret_858; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u16(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
-  uint32x4_t __s0_843 = __p0_843; \
-  uint16x4_t __s1_843 = __p1_843; \
-  uint16x4_t __s2_843 = __p2_843; \
-  uint32x4_t __ret_843; \
-  __ret_843 = __s0_843 + vmull_u16(__s1_843, splat_lane_u16(__s2_843, __p3_843)); \
-  __ret_843; \
+#define vmlal_lane_u16(__p0_859, __p1_859, __p2_859, __p3_859) __extension__ ({ \
+  uint32x4_t __s0_859 = __p0_859; \
+  uint16x4_t __s1_859 = __p1_859; \
+  uint16x4_t __s2_859 = __p2_859; \
+  uint32x4_t __ret_859; \
+  __ret_859 = __s0_859 + vmull_u16(__s1_859, splat_lane_u16(__s2_859, __p3_859)); \
+  __ret_859; \
 })
 #else
-#define vmlal_lane_u16(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
-  uint32x4_t __s0_844 = __p0_844; \
-  uint16x4_t __s1_844 = __p1_844; \
-  uint16x4_t __s2_844 = __p2_844; \
-  uint32x4_t __rev0_844;  __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \
-  uint16x4_t __rev1_844;  __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 3, 2, 1, 0); \
-  uint16x4_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 3, 2, 1, 0); \
-  uint32x4_t __ret_844; \
-  __ret_844 = __rev0_844 + __noswap_vmull_u16(__rev1_844, __noswap_splat_lane_u16(__rev2_844, __p3_844)); \
-  __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \
-  __ret_844; \
+#define vmlal_lane_u16(__p0_860, __p1_860, __p2_860, __p3_860) __extension__ ({ \
+  uint32x4_t __s0_860 = __p0_860; \
+  uint16x4_t __s1_860 = __p1_860; \
+  uint16x4_t __s2_860 = __p2_860; \
+  uint32x4_t __rev0_860;  __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \
+  uint16x4_t __rev1_860;  __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 3, 2, 1, 0); \
+  uint16x4_t __rev2_860;  __rev2_860 = __builtin_shufflevector(__s2_860, __s2_860, 3, 2, 1, 0); \
+  uint32x4_t __ret_860; \
+  __ret_860 = __rev0_860 + __noswap_vmull_u16(__rev1_860, __noswap_splat_lane_u16(__rev2_860, __p3_860)); \
+  __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 3, 2, 1, 0); \
+  __ret_860; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
-  int64x2_t __s0_845 = __p0_845; \
-  int32x2_t __s1_845 = __p1_845; \
-  int32x2_t __s2_845 = __p2_845; \
-  int64x2_t __ret_845; \
-  __ret_845 = __s0_845 + vmull_s32(__s1_845, splat_lane_s32(__s2_845, __p3_845)); \
-  __ret_845; \
+#define vmlal_lane_s32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \
+  int64x2_t __s0_861 = __p0_861; \
+  int32x2_t __s1_861 = __p1_861; \
+  int32x2_t __s2_861 = __p2_861; \
+  int64x2_t __ret_861; \
+  __ret_861 = __s0_861 + vmull_s32(__s1_861, splat_lane_s32(__s2_861, __p3_861)); \
+  __ret_861; \
 })
 #else
-#define vmlal_lane_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
-  int64x2_t __s0_846 = __p0_846; \
-  int32x2_t __s1_846 = __p1_846; \
-  int32x2_t __s2_846 = __p2_846; \
-  int64x2_t __rev0_846;  __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \
-  int32x2_t __rev1_846;  __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 1, 0); \
-  int32x2_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \
-  int64x2_t __ret_846; \
-  __ret_846 = __rev0_846 + __noswap_vmull_s32(__rev1_846, __noswap_splat_lane_s32(__rev2_846, __p3_846)); \
-  __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \
-  __ret_846; \
+#define vmlal_lane_s32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \
+  int64x2_t __s0_862 = __p0_862; \
+  int32x2_t __s1_862 = __p1_862; \
+  int32x2_t __s2_862 = __p2_862; \
+  int64x2_t __rev0_862;  __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 1, 0); \
+  int32x2_t __rev1_862;  __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 1, 0); \
+  int32x2_t __rev2_862;  __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 1, 0); \
+  int64x2_t __ret_862; \
+  __ret_862 = __rev0_862 + __noswap_vmull_s32(__rev1_862, __noswap_splat_lane_s32(__rev2_862, __p3_862)); \
+  __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 1, 0); \
+  __ret_862; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s16(__p0_847, __p1_847, __p2_847, __p3_847) __extension__ ({ \
-  int32x4_t __s0_847 = __p0_847; \
-  int16x4_t __s1_847 = __p1_847; \
-  int16x4_t __s2_847 = __p2_847; \
-  int32x4_t __ret_847; \
-  __ret_847 = __s0_847 + vmull_s16(__s1_847, splat_lane_s16(__s2_847, __p3_847)); \
-  __ret_847; \
+#define vmlal_lane_s16(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \
+  int32x4_t __s0_863 = __p0_863; \
+  int16x4_t __s1_863 = __p1_863; \
+  int16x4_t __s2_863 = __p2_863; \
+  int32x4_t __ret_863; \
+  __ret_863 = __s0_863 + vmull_s16(__s1_863, splat_lane_s16(__s2_863, __p3_863)); \
+  __ret_863; \
 })
 #else
-#define vmlal_lane_s16(__p0_848, __p1_848, __p2_848, __p3_848) __extension__ ({ \
-  int32x4_t __s0_848 = __p0_848; \
-  int16x4_t __s1_848 = __p1_848; \
-  int16x4_t __s2_848 = __p2_848; \
-  int32x4_t __rev0_848;  __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \
-  int16x4_t __rev1_848;  __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 3, 2, 1, 0); \
-  int16x4_t __rev2_848;  __rev2_848 = __builtin_shufflevector(__s2_848, __s2_848, 3, 2, 1, 0); \
-  int32x4_t __ret_848; \
-  __ret_848 = __rev0_848 + __noswap_vmull_s16(__rev1_848, __noswap_splat_lane_s16(__rev2_848, __p3_848)); \
-  __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 3, 2, 1, 0); \
-  __ret_848; \
+#define vmlal_lane_s16(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \
+  int32x4_t __s0_864 = __p0_864; \
+  int16x4_t __s1_864 = __p1_864; \
+  int16x4_t __s2_864 = __p2_864; \
+  int32x4_t __rev0_864;  __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \
+  int16x4_t __rev1_864;  __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 3, 2, 1, 0); \
+  int16x4_t __rev2_864;  __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 3, 2, 1, 0); \
+  int32x4_t __ret_864; \
+  __ret_864 = __rev0_864 + __noswap_vmull_s16(__rev1_864, __noswap_splat_lane_s16(__rev2_864, __p3_864)); \
+  __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \
+  __ret_864; \
 })
 #endif
 
@@ -67774,98 +67962,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u32(__p0_849, __p1_849, __p2_849, __p3_849) __extension__ ({ \
-  uint64x2_t __s0_849 = __p0_849; \
-  uint32x2_t __s1_849 = __p1_849; \
-  uint32x2_t __s2_849 = __p2_849; \
-  uint64x2_t __ret_849; \
-  __ret_849 = __s0_849 - vmull_u32(__s1_849, splat_lane_u32(__s2_849, __p3_849)); \
-  __ret_849; \
+#define vmlsl_lane_u32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \
+  uint64x2_t __s0_865 = __p0_865; \
+  uint32x2_t __s1_865 = __p1_865; \
+  uint32x2_t __s2_865 = __p2_865; \
+  uint64x2_t __ret_865; \
+  __ret_865 = __s0_865 - vmull_u32(__s1_865, splat_lane_u32(__s2_865, __p3_865)); \
+  __ret_865; \
 })
 #else
-#define vmlsl_lane_u32(__p0_850, __p1_850, __p2_850, __p3_850) __extension__ ({ \
-  uint64x2_t __s0_850 = __p0_850; \
-  uint32x2_t __s1_850 = __p1_850; \
-  uint32x2_t __s2_850 = __p2_850; \
-  uint64x2_t __rev0_850;  __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 1, 0); \
-  uint32x2_t __rev1_850;  __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 1, 0); \
-  uint32x2_t __rev2_850;  __rev2_850 = __builtin_shufflevector(__s2_850, __s2_850, 1, 0); \
-  uint64x2_t __ret_850; \
-  __ret_850 = __rev0_850 - __noswap_vmull_u32(__rev1_850, __noswap_splat_lane_u32(__rev2_850, __p3_850)); \
-  __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 1, 0); \
-  __ret_850; \
+#define vmlsl_lane_u32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \
+  uint64x2_t __s0_866 = __p0_866; \
+  uint32x2_t __s1_866 = __p1_866; \
+  uint32x2_t __s2_866 = __p2_866; \
+  uint64x2_t __rev0_866;  __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \
+  uint32x2_t __rev1_866;  __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \
+  uint32x2_t __rev2_866;  __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \
+  uint64x2_t __ret_866; \
+  __ret_866 = __rev0_866 - __noswap_vmull_u32(__rev1_866, __noswap_splat_lane_u32(__rev2_866, __p3_866)); \
+  __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 1, 0); \
+  __ret_866; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u16(__p0_851, __p1_851, __p2_851, __p3_851) __extension__ ({ \
-  uint32x4_t __s0_851 = __p0_851; \
-  uint16x4_t __s1_851 = __p1_851; \
-  uint16x4_t __s2_851 = __p2_851; \
-  uint32x4_t __ret_851; \
-  __ret_851 = __s0_851 - vmull_u16(__s1_851, splat_lane_u16(__s2_851, __p3_851)); \
-  __ret_851; \
+#define vmlsl_lane_u16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \
+  uint32x4_t __s0_867 = __p0_867; \
+  uint16x4_t __s1_867 = __p1_867; \
+  uint16x4_t __s2_867 = __p2_867; \
+  uint32x4_t __ret_867; \
+  __ret_867 = __s0_867 - vmull_u16(__s1_867, splat_lane_u16(__s2_867, __p3_867)); \
+  __ret_867; \
 })
 #else
-#define vmlsl_lane_u16(__p0_852, __p1_852, __p2_852, __p3_852) __extension__ ({ \
-  uint32x4_t __s0_852 = __p0_852; \
-  uint16x4_t __s1_852 = __p1_852; \
-  uint16x4_t __s2_852 = __p2_852; \
-  uint32x4_t __rev0_852;  __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \
-  uint16x4_t __rev1_852;  __rev1_852 = __builtin_shufflevector(__s1_852, __s1_852, 3, 2, 1, 0); \
-  uint16x4_t __rev2_852;  __rev2_852 = __builtin_shufflevector(__s2_852, __s2_852, 3, 2, 1, 0); \
-  uint32x4_t __ret_852; \
-  __ret_852 = __rev0_852 - __noswap_vmull_u16(__rev1_852, __noswap_splat_lane_u16(__rev2_852, __p3_852)); \
-  __ret_852 = __builtin_shufflevector(__ret_852, __ret_852, 3, 2, 1, 0); \
-  __ret_852; \
+#define vmlsl_lane_u16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \
+  uint32x4_t __s0_868 = __p0_868; \
+  uint16x4_t __s1_868 = __p1_868; \
+  uint16x4_t __s2_868 = __p2_868; \
+  uint32x4_t __rev0_868;  __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \
+  uint16x4_t __rev1_868;  __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \
+  uint16x4_t __rev2_868;  __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \
+  uint32x4_t __ret_868; \
+  __ret_868 = __rev0_868 - __noswap_vmull_u16(__rev1_868, __noswap_splat_lane_u16(__rev2_868, __p3_868)); \
+  __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \
+  __ret_868; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \
-  int64x2_t __s0_853 = __p0_853; \
-  int32x2_t __s1_853 = __p1_853; \
-  int32x2_t __s2_853 = __p2_853; \
-  int64x2_t __ret_853; \
-  __ret_853 = __s0_853 - vmull_s32(__s1_853, splat_lane_s32(__s2_853, __p3_853)); \
-  __ret_853; \
+#define vmlsl_lane_s32(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \
+  int64x2_t __s0_869 = __p0_869; \
+  int32x2_t __s1_869 = __p1_869; \
+  int32x2_t __s2_869 = __p2_869; \
+  int64x2_t __ret_869; \
+  __ret_869 = __s0_869 - vmull_s32(__s1_869, splat_lane_s32(__s2_869, __p3_869)); \
+  __ret_869; \
 })
 #else
-#define vmlsl_lane_s32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \
-  int64x2_t __s0_854 = __p0_854; \
-  int32x2_t __s1_854 = __p1_854; \
-  int32x2_t __s2_854 = __p2_854; \
-  int64x2_t __rev0_854;  __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \
-  int32x2_t __rev1_854;  __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \
-  int32x2_t __rev2_854;  __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \
-  int64x2_t __ret_854; \
-  __ret_854 = __rev0_854 - __noswap_vmull_s32(__rev1_854, __noswap_splat_lane_s32(__rev2_854, __p3_854)); \
-  __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \
-  __ret_854; \
+#define vmlsl_lane_s32(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \
+  int64x2_t __s0_870 = __p0_870; \
+  int32x2_t __s1_870 = __p1_870; \
+  int32x2_t __s2_870 = __p2_870; \
+  int64x2_t __rev0_870;  __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 1, 0); \
+  int32x2_t __rev1_870;  __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 1, 0); \
+  int32x2_t __rev2_870;  __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 1, 0); \
+  int64x2_t __ret_870; \
+  __ret_870 = __rev0_870 - __noswap_vmull_s32(__rev1_870, __noswap_splat_lane_s32(__rev2_870, __p3_870)); \
+  __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 1, 0); \
+  __ret_870; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \
-  int32x4_t __s0_855 = __p0_855; \
-  int16x4_t __s1_855 = __p1_855; \
-  int16x4_t __s2_855 = __p2_855; \
-  int32x4_t __ret_855; \
-  __ret_855 = __s0_855 - vmull_s16(__s1_855, splat_lane_s16(__s2_855, __p3_855)); \
-  __ret_855; \
+#define vmlsl_lane_s16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \
+  int32x4_t __s0_871 = __p0_871; \
+  int16x4_t __s1_871 = __p1_871; \
+  int16x4_t __s2_871 = __p2_871; \
+  int32x4_t __ret_871; \
+  __ret_871 = __s0_871 - vmull_s16(__s1_871, splat_lane_s16(__s2_871, __p3_871)); \
+  __ret_871; \
 })
 #else
-#define vmlsl_lane_s16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \
-  int32x4_t __s0_856 = __p0_856; \
-  int16x4_t __s1_856 = __p1_856; \
-  int16x4_t __s2_856 = __p2_856; \
-  int32x4_t __rev0_856;  __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \
-  int16x4_t __rev1_856;  __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \
-  int16x4_t __rev2_856;  __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \
-  int32x4_t __ret_856; \
-  __ret_856 = __rev0_856 - __noswap_vmull_s16(__rev1_856, __noswap_splat_lane_s16(__rev2_856, __p3_856)); \
-  __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \
-  __ret_856; \
+#define vmlsl_lane_s16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \
+  int32x4_t __s0_872 = __p0_872; \
+  int16x4_t __s1_872 = __p1_872; \
+  int16x4_t __s2_872 = __p2_872; \
+  int32x4_t __rev0_872;  __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 3, 2, 1, 0); \
+  int16x4_t __rev1_872;  __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \
+  int16x4_t __rev2_872;  __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \
+  int32x4_t __ret_872; \
+  __ret_872 = __rev0_872 - __noswap_vmull_s16(__rev1_872, __noswap_splat_lane_s16(__rev2_872, __p3_872)); \
+  __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 3, 2, 1, 0); \
+  __ret_872; \
 })
 #endif
 
@@ -67958,151 +68146,151 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vset_lane_f16(__p0_857, __p1_857, __p2_857) __extension__ ({ \
-  float16_t __s0_857 = __p0_857; \
-  float16x4_t __s1_857 = __p1_857; \
-  float16x4_t __ret_857; \
-float16_t __reint_857 = __s0_857; \
-float16x4_t __reint1_857 = __s1_857; \
-int16x4_t __reint2_857 = vset_lane_s16(*(int16_t *) &__reint_857, *(int16x4_t *) &__reint1_857, __p2_857); \
-  __ret_857 = *(float16x4_t *) &__reint2_857; \
-  __ret_857; \
+#define vset_lane_f16(__p0_873, __p1_873, __p2_873) __extension__ ({ \
+  float16_t __s0_873 = __p0_873; \
+  float16x4_t __s1_873 = __p1_873; \
+  float16x4_t __ret_873; \
+float16_t __reint_873 = __s0_873; \
+float16x4_t __reint1_873 = __s1_873; \
+int16x4_t __reint2_873 = vset_lane_s16(*(int16_t *) &__reint_873, *(int16x4_t *) &__reint1_873, __p2_873); \
+  __ret_873 = *(float16x4_t *) &__reint2_873; \
+  __ret_873; \
 })
 #else
-#define vset_lane_f16(__p0_858, __p1_858, __p2_858) __extension__ ({ \
-  float16_t __s0_858 = __p0_858; \
-  float16x4_t __s1_858 = __p1_858; \
-  float16x4_t __rev1_858;  __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 3, 2, 1, 0); \
-  float16x4_t __ret_858; \
-float16_t __reint_858 = __s0_858; \
-float16x4_t __reint1_858 = __rev1_858; \
-int16x4_t __reint2_858 = __noswap_vset_lane_s16(*(int16_t *) &__reint_858, *(int16x4_t *) &__reint1_858, __p2_858); \
-  __ret_858 = *(float16x4_t *) &__reint2_858; \
-  __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 3, 2, 1, 0); \
-  __ret_858; \
+#define vset_lane_f16(__p0_874, __p1_874, __p2_874) __extension__ ({ \
+  float16_t __s0_874 = __p0_874; \
+  float16x4_t __s1_874 = __p1_874; \
+  float16x4_t __rev1_874;  __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 3, 2, 1, 0); \
+  float16x4_t __ret_874; \
+float16_t __reint_874 = __s0_874; \
+float16x4_t __reint1_874 = __rev1_874; \
+int16x4_t __reint2_874 = __noswap_vset_lane_s16(*(int16_t *) &__reint_874, *(int16x4_t *) &__reint1_874, __p2_874); \
+  __ret_874 = *(float16x4_t *) &__reint2_874; \
+  __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \
+  __ret_874; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f16(__p0_859, __p1_859, __p2_859) __extension__ ({ \
-  float16_t __s0_859 = __p0_859; \
-  float16x8_t __s1_859 = __p1_859; \
-  float16x8_t __ret_859; \
-float16_t __reint_859 = __s0_859; \
-float16x8_t __reint1_859 = __s1_859; \
-int16x8_t __reint2_859 = vsetq_lane_s16(*(int16_t *) &__reint_859, *(int16x8_t *) &__reint1_859, __p2_859); \
-  __ret_859 = *(float16x8_t *) &__reint2_859; \
-  __ret_859; \
+#define vsetq_lane_f16(__p0_875, __p1_875, __p2_875) __extension__ ({ \
+  float16_t __s0_875 = __p0_875; \
+  float16x8_t __s1_875 = __p1_875; \
+  float16x8_t __ret_875; \
+float16_t __reint_875 = __s0_875; \
+float16x8_t __reint1_875 = __s1_875; \
+int16x8_t __reint2_875 = vsetq_lane_s16(*(int16_t *) &__reint_875, *(int16x8_t *) &__reint1_875, __p2_875); \
+  __ret_875 = *(float16x8_t *) &__reint2_875; \
+  __ret_875; \
 })
 #else
-#define vsetq_lane_f16(__p0_860, __p1_860, __p2_860) __extension__ ({ \
-  float16_t __s0_860 = __p0_860; \
-  float16x8_t __s1_860 = __p1_860; \
-  float16x8_t __rev1_860;  __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_860; \
-float16_t __reint_860 = __s0_860; \
-float16x8_t __reint1_860 = __rev1_860; \
-int16x8_t __reint2_860 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_860, *(int16x8_t *) &__reint1_860, __p2_860); \
-  __ret_860 = *(float16x8_t *) &__reint2_860; \
-  __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_860; \
+#define vsetq_lane_f16(__p0_876, __p1_876, __p2_876) __extension__ ({ \
+  float16_t __s0_876 = __p0_876; \
+  float16x8_t __s1_876 = __p1_876; \
+  float16x8_t __rev1_876;  __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_876; \
+float16_t __reint_876 = __s0_876; \
+float16x8_t __reint1_876 = __rev1_876; \
+int16x8_t __reint2_876 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_876, *(int16x8_t *) &__reint1_876, __p2_876); \
+  __ret_876 = *(float16x8_t *) &__reint2_876; \
+  __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_876; \
 })
 #endif
 
 #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_lane_f32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \
-  float32x4_t __s0_861 = __p0_861; \
-  bfloat16x8_t __s1_861 = __p1_861; \
-  bfloat16x4_t __s2_861 = __p2_861; \
-  float32x4_t __ret_861; \
-  __ret_861 = vbfmlalbq_f32(__s0_861, __s1_861, (bfloat16x8_t) {vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861)}); \
-  __ret_861; \
+#define vbfmlalbq_lane_f32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \
+  float32x4_t __s0_877 = __p0_877; \
+  bfloat16x8_t __s1_877 = __p1_877; \
+  bfloat16x4_t __s2_877 = __p2_877; \
+  float32x4_t __ret_877; \
+  __ret_877 = vbfmlalbq_f32(__s0_877, __s1_877, (bfloat16x8_t) {vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877)}); \
+  __ret_877; \
 })
 #else
-#define vbfmlalbq_lane_f32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \
-  float32x4_t __s0_862 = __p0_862; \
-  bfloat16x8_t __s1_862 = __p1_862; \
-  bfloat16x4_t __s2_862 = __p2_862; \
-  float32x4_t __rev0_862;  __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_862;  __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_862;  __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 3, 2, 1, 0); \
-  float32x4_t __ret_862; \
-  __ret_862 = __noswap_vbfmlalbq_f32(__rev0_862, __rev1_862, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862)}); \
-  __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 3, 2, 1, 0); \
-  __ret_862; \
+#define vbfmlalbq_lane_f32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \
+  float32x4_t __s0_878 = __p0_878; \
+  bfloat16x8_t __s1_878 = __p1_878; \
+  bfloat16x4_t __s2_878 = __p2_878; \
+  float32x4_t __rev0_878;  __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_878;  __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_878;  __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 3, 2, 1, 0); \
+  float32x4_t __ret_878; \
+  __ret_878 = __noswap_vbfmlalbq_f32(__rev0_878, __rev1_878, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878)}); \
+  __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \
+  __ret_878; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_laneq_f32(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \
-  float32x4_t __s0_863 = __p0_863; \
-  bfloat16x8_t __s1_863 = __p1_863; \
-  bfloat16x8_t __s2_863 = __p2_863; \
-  float32x4_t __ret_863; \
-  __ret_863 = vbfmlalbq_f32(__s0_863, __s1_863, (bfloat16x8_t) {vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863)}); \
-  __ret_863; \
+#define vbfmlalbq_laneq_f32(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \
+  float32x4_t __s0_879 = __p0_879; \
+  bfloat16x8_t __s1_879 = __p1_879; \
+  bfloat16x8_t __s2_879 = __p2_879; \
+  float32x4_t __ret_879; \
+  __ret_879 = vbfmlalbq_f32(__s0_879, __s1_879, (bfloat16x8_t) {vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879)}); \
+  __ret_879; \
 })
 #else
-#define vbfmlalbq_laneq_f32(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \
-  float32x4_t __s0_864 = __p0_864; \
-  bfloat16x8_t __s1_864 = __p1_864; \
-  bfloat16x8_t __s2_864 = __p2_864; \
-  float32x4_t __rev0_864;  __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_864;  __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_864;  __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_864; \
-  __ret_864 = __noswap_vbfmlalbq_f32(__rev0_864, __rev1_864, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864)}); \
-  __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \
-  __ret_864; \
+#define vbfmlalbq_laneq_f32(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \
+  float32x4_t __s0_880 = __p0_880; \
+  bfloat16x8_t __s1_880 = __p1_880; \
+  bfloat16x8_t __s2_880 = __p2_880; \
+  float32x4_t __rev0_880;  __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_880;  __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_880;  __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_880; \
+  __ret_880 = __noswap_vbfmlalbq_f32(__rev0_880, __rev1_880, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880)}); \
+  __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 3, 2, 1, 0); \
+  __ret_880; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_lane_f32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \
-  float32x4_t __s0_865 = __p0_865; \
-  bfloat16x8_t __s1_865 = __p1_865; \
-  bfloat16x4_t __s2_865 = __p2_865; \
-  float32x4_t __ret_865; \
-  __ret_865 = vbfmlaltq_f32(__s0_865, __s1_865, (bfloat16x8_t) {vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865)}); \
-  __ret_865; \
+#define vbfmlaltq_lane_f32(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \
+  float32x4_t __s0_881 = __p0_881; \
+  bfloat16x8_t __s1_881 = __p1_881; \
+  bfloat16x4_t __s2_881 = __p2_881; \
+  float32x4_t __ret_881; \
+  __ret_881 = vbfmlaltq_f32(__s0_881, __s1_881, (bfloat16x8_t) {vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881)}); \
+  __ret_881; \
 })
 #else
-#define vbfmlaltq_lane_f32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \
-  float32x4_t __s0_866 = __p0_866; \
-  bfloat16x8_t __s1_866 = __p1_866; \
-  bfloat16x4_t __s2_866 = __p2_866; \
-  float32x4_t __rev0_866;  __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_866;  __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_866;  __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 3, 2, 1, 0); \
-  float32x4_t __ret_866; \
-  __ret_866 = __noswap_vbfmlaltq_f32(__rev0_866, __rev1_866, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866)}); \
-  __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 3, 2, 1, 0); \
-  __ret_866; \
+#define vbfmlaltq_lane_f32(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \
+  float32x4_t __s0_882 = __p0_882; \
+  bfloat16x8_t __s1_882 = __p1_882; \
+  bfloat16x4_t __s2_882 = __p2_882; \
+  float32x4_t __rev0_882;  __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_882;  __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_882;  __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 3, 2, 1, 0); \
+  float32x4_t __ret_882; \
+  __ret_882 = __noswap_vbfmlaltq_f32(__rev0_882, __rev1_882, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882)}); \
+  __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \
+  __ret_882; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_laneq_f32(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \
-  float32x4_t __s0_867 = __p0_867; \
-  bfloat16x8_t __s1_867 = __p1_867; \
-  bfloat16x8_t __s2_867 = __p2_867; \
-  float32x4_t __ret_867; \
-  __ret_867 = vbfmlaltq_f32(__s0_867, __s1_867, (bfloat16x8_t) {vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867)}); \
-  __ret_867; \
+#define vbfmlaltq_laneq_f32(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \
+  float32x4_t __s0_883 = __p0_883; \
+  bfloat16x8_t __s1_883 = __p1_883; \
+  bfloat16x8_t __s2_883 = __p2_883; \
+  float32x4_t __ret_883; \
+  __ret_883 = vbfmlaltq_f32(__s0_883, __s1_883, (bfloat16x8_t) {vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883)}); \
+  __ret_883; \
 })
 #else
-#define vbfmlaltq_laneq_f32(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \
-  float32x4_t __s0_868 = __p0_868; \
-  bfloat16x8_t __s1_868 = __p1_868; \
-  bfloat16x8_t __s2_868 = __p2_868; \
-  float32x4_t __rev0_868;  __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_868;  __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_868;  __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_868; \
-  __ret_868 = __noswap_vbfmlaltq_f32(__rev0_868, __rev1_868, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868)}); \
-  __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \
-  __ret_868; \
+#define vbfmlaltq_laneq_f32(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \
+  float32x4_t __s0_884 = __p0_884; \
+  bfloat16x8_t __s1_884 = __p1_884; \
+  bfloat16x8_t __s2_884 = __p2_884; \
+  float32x4_t __rev0_884;  __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_884;  __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_884;  __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_884; \
+  __ret_884 = __noswap_vbfmlaltq_f32(__rev0_884, __rev1_884, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884)}); \
+  __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 3, 2, 1, 0); \
+  __ret_884; \
 })
 #endif
 
@@ -68141,208 +68329,16 @@
 #endif
 #if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_high_f16(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \
-  float32x4_t __s0_869 = __p0_869; \
-  float16x8_t __s1_869 = __p1_869; \
-  float16x4_t __s2_869 = __p2_869; \
-  float32x4_t __ret_869; \
-  __ret_869 = vfmlalq_high_f16(__s0_869, __s1_869, (float16x8_t) {vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869)}); \
-  __ret_869; \
-})
-#else
-#define vfmlalq_lane_high_f16(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \
-  float32x4_t __s0_870 = __p0_870; \
-  float16x8_t __s1_870 = __p1_870; \
-  float16x4_t __s2_870 = __p2_870; \
-  float32x4_t __rev0_870;  __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 3, 2, 1, 0); \
-  float16x8_t __rev1_870;  __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_870;  __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 3, 2, 1, 0); \
-  float32x4_t __ret_870; \
-  __ret_870 = __noswap_vfmlalq_high_f16(__rev0_870, __rev1_870, (float16x8_t) {__noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870)}); \
-  __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \
-  __ret_870; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_high_f16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \
-  float32x2_t __s0_871 = __p0_871; \
-  float16x4_t __s1_871 = __p1_871; \
-  float16x4_t __s2_871 = __p2_871; \
-  float32x2_t __ret_871; \
-  __ret_871 = vfmlal_high_f16(__s0_871, __s1_871, (float16x4_t) {vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871)}); \
-  __ret_871; \
-})
-#else
-#define vfmlal_lane_high_f16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \
-  float32x2_t __s0_872 = __p0_872; \
-  float16x4_t __s1_872 = __p1_872; \
-  float16x4_t __s2_872 = __p2_872; \
-  float32x2_t __rev0_872;  __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 1, 0); \
-  float16x4_t __rev1_872;  __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \
-  float16x4_t __rev2_872;  __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \
-  float32x2_t __ret_872; \
-  __ret_872 = __noswap_vfmlal_high_f16(__rev0_872, __rev1_872, (float16x4_t) {__noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872)}); \
-  __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 1, 0); \
-  __ret_872; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_low_f16(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \
-  float32x4_t __s0_873 = __p0_873; \
-  float16x8_t __s1_873 = __p1_873; \
-  float16x4_t __s2_873 = __p2_873; \
-  float32x4_t __ret_873; \
-  __ret_873 = vfmlalq_low_f16(__s0_873, __s1_873, (float16x8_t) {vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873)}); \
-  __ret_873; \
-})
-#else
-#define vfmlalq_lane_low_f16(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \
-  float32x4_t __s0_874 = __p0_874; \
-  float16x8_t __s1_874 = __p1_874; \
-  float16x4_t __s2_874 = __p2_874; \
-  float32x4_t __rev0_874;  __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \
-  float16x8_t __rev1_874;  __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_874;  __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \
-  float32x4_t __ret_874; \
-  __ret_874 = __noswap_vfmlalq_low_f16(__rev0_874, __rev1_874, (float16x8_t) {__noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874)}); \
-  __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \
-  __ret_874; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_low_f16(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \
-  float32x2_t __s0_875 = __p0_875; \
-  float16x4_t __s1_875 = __p1_875; \
-  float16x4_t __s2_875 = __p2_875; \
-  float32x2_t __ret_875; \
-  __ret_875 = vfmlal_low_f16(__s0_875, __s1_875, (float16x4_t) {vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875)}); \
-  __ret_875; \
-})
-#else
-#define vfmlal_lane_low_f16(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \
-  float32x2_t __s0_876 = __p0_876; \
-  float16x4_t __s1_876 = __p1_876; \
-  float16x4_t __s2_876 = __p2_876; \
-  float32x2_t __rev0_876;  __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 1, 0); \
-  float16x4_t __rev1_876;  __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 3, 2, 1, 0); \
-  float16x4_t __rev2_876;  __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 3, 2, 1, 0); \
-  float32x2_t __ret_876; \
-  __ret_876 = __noswap_vfmlal_low_f16(__rev0_876, __rev1_876, (float16x4_t) {__noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876)}); \
-  __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 1, 0); \
-  __ret_876; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_high_f16(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \
-  float32x4_t __s0_877 = __p0_877; \
-  float16x8_t __s1_877 = __p1_877; \
-  float16x8_t __s2_877 = __p2_877; \
-  float32x4_t __ret_877; \
-  __ret_877 = vfmlalq_high_f16(__s0_877, __s1_877, (float16x8_t) {vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877)}); \
-  __ret_877; \
-})
-#else
-#define vfmlalq_laneq_high_f16(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \
-  float32x4_t __s0_878 = __p0_878; \
-  float16x8_t __s1_878 = __p1_878; \
-  float16x8_t __s2_878 = __p2_878; \
-  float32x4_t __rev0_878;  __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \
-  float16x8_t __rev1_878;  __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_878;  __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_878; \
-  __ret_878 = __noswap_vfmlalq_high_f16(__rev0_878, __rev1_878, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878)}); \
-  __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \
-  __ret_878; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_high_f16(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \
-  float32x2_t __s0_879 = __p0_879; \
-  float16x4_t __s1_879 = __p1_879; \
-  float16x8_t __s2_879 = __p2_879; \
-  float32x2_t __ret_879; \
-  __ret_879 = vfmlal_high_f16(__s0_879, __s1_879, (float16x4_t) {vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879)}); \
-  __ret_879; \
-})
-#else
-#define vfmlal_laneq_high_f16(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \
-  float32x2_t __s0_880 = __p0_880; \
-  float16x4_t __s1_880 = __p1_880; \
-  float16x8_t __s2_880 = __p2_880; \
-  float32x2_t __rev0_880;  __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 1, 0); \
-  float16x4_t __rev1_880;  __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 3, 2, 1, 0); \
-  float16x8_t __rev2_880;  __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_880; \
-  __ret_880 = __noswap_vfmlal_high_f16(__rev0_880, __rev1_880, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880)}); \
-  __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 1, 0); \
-  __ret_880; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_low_f16(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \
-  float32x4_t __s0_881 = __p0_881; \
-  float16x8_t __s1_881 = __p1_881; \
-  float16x8_t __s2_881 = __p2_881; \
-  float32x4_t __ret_881; \
-  __ret_881 = vfmlalq_low_f16(__s0_881, __s1_881, (float16x8_t) {vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881)}); \
-  __ret_881; \
-})
-#else
-#define vfmlalq_laneq_low_f16(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \
-  float32x4_t __s0_882 = __p0_882; \
-  float16x8_t __s1_882 = __p1_882; \
-  float16x8_t __s2_882 = __p2_882; \
-  float32x4_t __rev0_882;  __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \
-  float16x8_t __rev1_882;  __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_882;  __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_882; \
-  __ret_882 = __noswap_vfmlalq_low_f16(__rev0_882, __rev1_882, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882)}); \
-  __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \
-  __ret_882; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_low_f16(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \
-  float32x2_t __s0_883 = __p0_883; \
-  float16x4_t __s1_883 = __p1_883; \
-  float16x8_t __s2_883 = __p2_883; \
-  float32x2_t __ret_883; \
-  __ret_883 = vfmlal_low_f16(__s0_883, __s1_883, (float16x4_t) {vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883)}); \
-  __ret_883; \
-})
-#else
-#define vfmlal_laneq_low_f16(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \
-  float32x2_t __s0_884 = __p0_884; \
-  float16x4_t __s1_884 = __p1_884; \
-  float16x8_t __s2_884 = __p2_884; \
-  float32x2_t __rev0_884;  __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 1, 0); \
-  float16x4_t __rev1_884;  __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 3, 2, 1, 0); \
-  float16x8_t __rev2_884;  __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_884; \
-  __ret_884 = __noswap_vfmlal_low_f16(__rev0_884, __rev1_884, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884)}); \
-  __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 1, 0); \
-  __ret_884; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \
+#define vfmlalq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \
   float32x4_t __s0_885 = __p0_885; \
   float16x8_t __s1_885 = __p1_885; \
   float16x4_t __s2_885 = __p2_885; \
   float32x4_t __ret_885; \
-  __ret_885 = vfmlslq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \
+  __ret_885 = vfmlalq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \
   __ret_885; \
 })
 #else
-#define vfmlslq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \
+#define vfmlalq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \
   float32x4_t __s0_886 = __p0_886; \
   float16x8_t __s1_886 = __p1_886; \
   float16x4_t __s2_886 = __p2_886; \
@@ -68350,23 +68346,23 @@
   float16x8_t __rev1_886;  __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x4_t __rev2_886;  __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, 3, 2, 1, 0); \
   float32x4_t __ret_886; \
-  __ret_886 = __noswap_vfmlslq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \
+  __ret_886 = __noswap_vfmlalq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \
   __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 3, 2, 1, 0); \
   __ret_886; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \
+#define vfmlal_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \
   float32x2_t __s0_887 = __p0_887; \
   float16x4_t __s1_887 = __p1_887; \
   float16x4_t __s2_887 = __p2_887; \
   float32x2_t __ret_887; \
-  __ret_887 = vfmlsl_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \
+  __ret_887 = vfmlal_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \
   __ret_887; \
 })
 #else
-#define vfmlsl_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \
+#define vfmlal_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \
   float32x2_t __s0_888 = __p0_888; \
   float16x4_t __s1_888 = __p1_888; \
   float16x4_t __s2_888 = __p2_888; \
@@ -68374,23 +68370,23 @@
   float16x4_t __rev1_888;  __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, 3, 2, 1, 0); \
   float16x4_t __rev2_888;  __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, 3, 2, 1, 0); \
   float32x2_t __ret_888; \
-  __ret_888 = __noswap_vfmlsl_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \
+  __ret_888 = __noswap_vfmlal_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \
   __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \
   __ret_888; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \
+#define vfmlalq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \
   float32x4_t __s0_889 = __p0_889; \
   float16x8_t __s1_889 = __p1_889; \
   float16x4_t __s2_889 = __p2_889; \
   float32x4_t __ret_889; \
-  __ret_889 = vfmlslq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \
+  __ret_889 = vfmlalq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \
   __ret_889; \
 })
 #else
-#define vfmlslq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \
+#define vfmlalq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \
   float32x4_t __s0_890 = __p0_890; \
   float16x8_t __s1_890 = __p1_890; \
   float16x4_t __s2_890 = __p2_890; \
@@ -68398,23 +68394,23 @@
   float16x8_t __rev1_890;  __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x4_t __rev2_890;  __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 3, 2, 1, 0); \
   float32x4_t __ret_890; \
-  __ret_890 = __noswap_vfmlslq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \
+  __ret_890 = __noswap_vfmlalq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \
   __ret_890 = __builtin_shufflevector(__ret_890, __ret_890, 3, 2, 1, 0); \
   __ret_890; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \
+#define vfmlal_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \
   float32x2_t __s0_891 = __p0_891; \
   float16x4_t __s1_891 = __p1_891; \
   float16x4_t __s2_891 = __p2_891; \
   float32x2_t __ret_891; \
-  __ret_891 = vfmlsl_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \
+  __ret_891 = vfmlal_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \
   __ret_891; \
 })
 #else
-#define vfmlsl_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \
+#define vfmlal_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \
   float32x2_t __s0_892 = __p0_892; \
   float16x4_t __s1_892 = __p1_892; \
   float16x4_t __s2_892 = __p2_892; \
@@ -68422,23 +68418,23 @@
   float16x4_t __rev1_892;  __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, 3, 2, 1, 0); \
   float16x4_t __rev2_892;  __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 3, 2, 1, 0); \
   float32x2_t __ret_892; \
-  __ret_892 = __noswap_vfmlsl_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \
+  __ret_892 = __noswap_vfmlal_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \
   __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \
   __ret_892; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \
+#define vfmlalq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \
   float32x4_t __s0_893 = __p0_893; \
   float16x8_t __s1_893 = __p1_893; \
   float16x8_t __s2_893 = __p2_893; \
   float32x4_t __ret_893; \
-  __ret_893 = vfmlslq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \
+  __ret_893 = vfmlalq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \
   __ret_893; \
 })
 #else
-#define vfmlslq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \
+#define vfmlalq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \
   float32x4_t __s0_894 = __p0_894; \
   float16x8_t __s1_894 = __p1_894; \
   float16x8_t __s2_894 = __p2_894; \
@@ -68446,23 +68442,23 @@
   float16x8_t __rev1_894;  __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x8_t __rev2_894;  __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 7, 6, 5, 4, 3, 2, 1, 0); \
   float32x4_t __ret_894; \
-  __ret_894 = __noswap_vfmlslq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \
+  __ret_894 = __noswap_vfmlalq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \
   __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 3, 2, 1, 0); \
   __ret_894; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \
+#define vfmlal_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \
   float32x2_t __s0_895 = __p0_895; \
   float16x4_t __s1_895 = __p1_895; \
   float16x8_t __s2_895 = __p2_895; \
   float32x2_t __ret_895; \
-  __ret_895 = vfmlsl_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \
+  __ret_895 = vfmlal_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \
   __ret_895; \
 })
 #else
-#define vfmlsl_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \
+#define vfmlal_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \
   float32x2_t __s0_896 = __p0_896; \
   float16x4_t __s1_896 = __p1_896; \
   float16x8_t __s2_896 = __p2_896; \
@@ -68470,23 +68466,23 @@
   float16x4_t __rev1_896;  __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, 3, 2, 1, 0); \
   float16x8_t __rev2_896;  __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 7, 6, 5, 4, 3, 2, 1, 0); \
   float32x2_t __ret_896; \
-  __ret_896 = __noswap_vfmlsl_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \
+  __ret_896 = __noswap_vfmlal_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \
   __ret_896 = __builtin_shufflevector(__ret_896, __ret_896, 1, 0); \
   __ret_896; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \
+#define vfmlalq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \
   float32x4_t __s0_897 = __p0_897; \
   float16x8_t __s1_897 = __p1_897; \
   float16x8_t __s2_897 = __p2_897; \
   float32x4_t __ret_897; \
-  __ret_897 = vfmlslq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \
+  __ret_897 = vfmlalq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \
   __ret_897; \
 })
 #else
-#define vfmlslq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \
+#define vfmlalq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \
   float32x4_t __s0_898 = __p0_898; \
   float16x8_t __s1_898 = __p1_898; \
   float16x8_t __s2_898 = __p2_898; \
@@ -68494,23 +68490,23 @@
   float16x8_t __rev1_898;  __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x8_t __rev2_898;  __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 7, 6, 5, 4, 3, 2, 1, 0); \
   float32x4_t __ret_898; \
-  __ret_898 = __noswap_vfmlslq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \
+  __ret_898 = __noswap_vfmlalq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \
   __ret_898 = __builtin_shufflevector(__ret_898, __ret_898, 3, 2, 1, 0); \
   __ret_898; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \
+#define vfmlal_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \
   float32x2_t __s0_899 = __p0_899; \
   float16x4_t __s1_899 = __p1_899; \
   float16x8_t __s2_899 = __p2_899; \
   float32x2_t __ret_899; \
-  __ret_899 = vfmlsl_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \
+  __ret_899 = vfmlal_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \
   __ret_899; \
 })
 #else
-#define vfmlsl_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \
+#define vfmlal_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \
   float32x2_t __s0_900 = __p0_900; \
   float16x4_t __s1_900 = __p1_900; \
   float16x8_t __s2_900 = __p2_900; \
@@ -68518,292 +68514,294 @@
   float16x4_t __rev1_900;  __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, 3, 2, 1, 0); \
   float16x8_t __rev2_900;  __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 7, 6, 5, 4, 3, 2, 1, 0); \
   float32x2_t __ret_900; \
-  __ret_900 = __noswap_vfmlsl_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \
+  __ret_900 = __noswap_vfmlal_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \
   __ret_900 = __builtin_shufflevector(__ret_900, __ret_900, 1, 0); \
   __ret_900; \
 })
 #endif
 
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vmulh_lane_f16(__p0_901, __p1_901, __p2_901) __extension__ ({ \
-  float16_t __s0_901 = __p0_901; \
-  float16x4_t __s1_901 = __p1_901; \
-  float16_t __ret_901; \
-  __ret_901 = __s0_901 * vget_lane_f16(__s1_901, __p2_901); \
+#define vfmlslq_lane_high_f16(__p0_901, __p1_901, __p2_901, __p3_901) __extension__ ({ \
+  float32x4_t __s0_901 = __p0_901; \
+  float16x8_t __s1_901 = __p1_901; \
+  float16x4_t __s2_901 = __p2_901; \
+  float32x4_t __ret_901; \
+  __ret_901 = vfmlslq_high_f16(__s0_901, __s1_901, (float16x8_t) {vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901)}); \
   __ret_901; \
 })
 #else
-#define vmulh_lane_f16(__p0_902, __p1_902, __p2_902) __extension__ ({ \
-  float16_t __s0_902 = __p0_902; \
-  float16x4_t __s1_902 = __p1_902; \
-  float16x4_t __rev1_902;  __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 3, 2, 1, 0); \
-  float16_t __ret_902; \
-  __ret_902 = __s0_902 * __noswap_vget_lane_f16(__rev1_902, __p2_902); \
+#define vfmlslq_lane_high_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \
+  float32x4_t __s0_902 = __p0_902; \
+  float16x8_t __s1_902 = __p1_902; \
+  float16x4_t __s2_902 = __p2_902; \
+  float32x4_t __rev0_902;  __rev0_902 = __builtin_shufflevector(__s0_902, __s0_902, 3, 2, 1, 0); \
+  float16x8_t __rev1_902;  __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_902;  __rev2_902 = __builtin_shufflevector(__s2_902, __s2_902, 3, 2, 1, 0); \
+  float32x4_t __ret_902; \
+  __ret_902 = __noswap_vfmlslq_high_f16(__rev0_902, __rev1_902, (float16x8_t) {__noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902)}); \
+  __ret_902 = __builtin_shufflevector(__ret_902, __ret_902, 3, 2, 1, 0); \
   __ret_902; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulh_laneq_f16(__p0_903, __p1_903, __p2_903) __extension__ ({ \
-  float16_t __s0_903 = __p0_903; \
-  float16x8_t __s1_903 = __p1_903; \
-  float16_t __ret_903; \
-  __ret_903 = __s0_903 * vgetq_lane_f16(__s1_903, __p2_903); \
+#define vfmlsl_lane_high_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \
+  float32x2_t __s0_903 = __p0_903; \
+  float16x4_t __s1_903 = __p1_903; \
+  float16x4_t __s2_903 = __p2_903; \
+  float32x2_t __ret_903; \
+  __ret_903 = vfmlsl_high_f16(__s0_903, __s1_903, (float16x4_t) {vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903)}); \
   __ret_903; \
 })
 #else
-#define vmulh_laneq_f16(__p0_904, __p1_904, __p2_904) __extension__ ({ \
-  float16_t __s0_904 = __p0_904; \
-  float16x8_t __s1_904 = __p1_904; \
-  float16x8_t __rev1_904;  __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_904; \
-  __ret_904 = __s0_904 * __noswap_vgetq_lane_f16(__rev1_904, __p2_904); \
+#define vfmlsl_lane_high_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \
+  float32x2_t __s0_904 = __p0_904; \
+  float16x4_t __s1_904 = __p1_904; \
+  float16x4_t __s2_904 = __p2_904; \
+  float32x2_t __rev0_904;  __rev0_904 = __builtin_shufflevector(__s0_904, __s0_904, 1, 0); \
+  float16x4_t __rev1_904;  __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 3, 2, 1, 0); \
+  float16x4_t __rev2_904;  __rev2_904 = __builtin_shufflevector(__s2_904, __s2_904, 3, 2, 1, 0); \
+  float32x2_t __ret_904; \
+  __ret_904 = __noswap_vfmlsl_high_f16(__rev0_904, __rev1_904, (float16x4_t) {__noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904)}); \
+  __ret_904 = __builtin_shufflevector(__ret_904, __ret_904, 1, 0); \
   __ret_904; \
 })
 #endif
 
-#endif
-#if defined(__ARM_FEATURE_MATMUL_INT8)
 #ifdef __LITTLE_ENDIAN__
-#define vsudotq_lane_s32(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \
-  int32x4_t __s0_905 = __p0_905; \
-  int8x16_t __s1_905 = __p1_905; \
-  uint8x8_t __s2_905 = __p2_905; \
-  int32x4_t __ret_905; \
-uint8x8_t __reint_905 = __s2_905; \
-  __ret_905 = vusdotq_s32(__s0_905, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_905, __p3_905)), __s1_905); \
+#define vfmlslq_lane_low_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \
+  float32x4_t __s0_905 = __p0_905; \
+  float16x8_t __s1_905 = __p1_905; \
+  float16x4_t __s2_905 = __p2_905; \
+  float32x4_t __ret_905; \
+  __ret_905 = vfmlslq_low_f16(__s0_905, __s1_905, (float16x8_t) {vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905)}); \
   __ret_905; \
 })
 #else
-#define vsudotq_lane_s32(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \
-  int32x4_t __s0_906 = __p0_906; \
-  int8x16_t __s1_906 = __p1_906; \
-  uint8x8_t __s2_906 = __p2_906; \
-  int32x4_t __rev0_906;  __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \
-  int8x16_t __rev1_906;  __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_906;  __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_906; \
-uint8x8_t __reint_906 = __rev2_906; \
-  __ret_906 = __noswap_vusdotq_s32(__rev0_906, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_906, __p3_906)), __rev1_906); \
+#define vfmlslq_lane_low_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \
+  float32x4_t __s0_906 = __p0_906; \
+  float16x8_t __s1_906 = __p1_906; \
+  float16x4_t __s2_906 = __p2_906; \
+  float32x4_t __rev0_906;  __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \
+  float16x8_t __rev1_906;  __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_906;  __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 3, 2, 1, 0); \
+  float32x4_t __ret_906; \
+  __ret_906 = __noswap_vfmlslq_low_f16(__rev0_906, __rev1_906, (float16x8_t) {__noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906)}); \
   __ret_906 = __builtin_shufflevector(__ret_906, __ret_906, 3, 2, 1, 0); \
   __ret_906; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsudot_lane_s32(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \
-  int32x2_t __s0_907 = __p0_907; \
-  int8x8_t __s1_907 = __p1_907; \
-  uint8x8_t __s2_907 = __p2_907; \
-  int32x2_t __ret_907; \
-uint8x8_t __reint_907 = __s2_907; \
-  __ret_907 = vusdot_s32(__s0_907, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_907, __p3_907)), __s1_907); \
+#define vfmlsl_lane_low_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \
+  float32x2_t __s0_907 = __p0_907; \
+  float16x4_t __s1_907 = __p1_907; \
+  float16x4_t __s2_907 = __p2_907; \
+  float32x2_t __ret_907; \
+  __ret_907 = vfmlsl_low_f16(__s0_907, __s1_907, (float16x4_t) {vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907)}); \
   __ret_907; \
 })
 #else
-#define vsudot_lane_s32(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \
-  int32x2_t __s0_908 = __p0_908; \
-  int8x8_t __s1_908 = __p1_908; \
-  uint8x8_t __s2_908 = __p2_908; \
-  int32x2_t __rev0_908;  __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \
-  int8x8_t __rev1_908;  __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_908;  __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_908; \
-uint8x8_t __reint_908 = __rev2_908; \
-  __ret_908 = __noswap_vusdot_s32(__rev0_908, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_908, __p3_908)), __rev1_908); \
+#define vfmlsl_lane_low_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \
+  float32x2_t __s0_908 = __p0_908; \
+  float16x4_t __s1_908 = __p1_908; \
+  float16x4_t __s2_908 = __p2_908; \
+  float32x2_t __rev0_908;  __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \
+  float16x4_t __rev1_908;  __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 3, 2, 1, 0); \
+  float16x4_t __rev2_908;  __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 3, 2, 1, 0); \
+  float32x2_t __ret_908; \
+  __ret_908 = __noswap_vfmlsl_low_f16(__rev0_908, __rev1_908, (float16x4_t) {__noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908)}); \
   __ret_908 = __builtin_shufflevector(__ret_908, __ret_908, 1, 0); \
   __ret_908; \
 })
 #endif
 
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_lane_s32(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \
-  int32_t __s0_909 = __p0_909; \
-  int32_t __s1_909 = __p1_909; \
-  int32x2_t __s2_909 = __p2_909; \
-  int32_t __ret_909; \
-  __ret_909 = vqadds_s32(__s0_909, vqrdmulhs_s32(__s1_909, vget_lane_s32(__s2_909, __p3_909))); \
+#define vfmlslq_laneq_high_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \
+  float32x4_t __s0_909 = __p0_909; \
+  float16x8_t __s1_909 = __p1_909; \
+  float16x8_t __s2_909 = __p2_909; \
+  float32x4_t __ret_909; \
+  __ret_909 = vfmlslq_high_f16(__s0_909, __s1_909, (float16x8_t) {vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909)}); \
   __ret_909; \
 })
 #else
-#define vqrdmlahs_lane_s32(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \
-  int32_t __s0_910 = __p0_910; \
-  int32_t __s1_910 = __p1_910; \
-  int32x2_t __s2_910 = __p2_910; \
-  int32x2_t __rev2_910;  __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 1, 0); \
-  int32_t __ret_910; \
-  __ret_910 = vqadds_s32(__s0_910, vqrdmulhs_s32(__s1_910, __noswap_vget_lane_s32(__rev2_910, __p3_910))); \
+#define vfmlslq_laneq_high_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \
+  float32x4_t __s0_910 = __p0_910; \
+  float16x8_t __s1_910 = __p1_910; \
+  float16x8_t __s2_910 = __p2_910; \
+  float32x4_t __rev0_910;  __rev0_910 = __builtin_shufflevector(__s0_910, __s0_910, 3, 2, 1, 0); \
+  float16x8_t __rev1_910;  __rev1_910 = __builtin_shufflevector(__s1_910, __s1_910, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_910;  __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_910; \
+  __ret_910 = __noswap_vfmlslq_high_f16(__rev0_910, __rev1_910, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910)}); \
+  __ret_910 = __builtin_shufflevector(__ret_910, __ret_910, 3, 2, 1, 0); \
   __ret_910; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_lane_s16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \
-  int16_t __s0_911 = __p0_911; \
-  int16_t __s1_911 = __p1_911; \
-  int16x4_t __s2_911 = __p2_911; \
-  int16_t __ret_911; \
-  __ret_911 = vqaddh_s16(__s0_911, vqrdmulhh_s16(__s1_911, vget_lane_s16(__s2_911, __p3_911))); \
+#define vfmlsl_laneq_high_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \
+  float32x2_t __s0_911 = __p0_911; \
+  float16x4_t __s1_911 = __p1_911; \
+  float16x8_t __s2_911 = __p2_911; \
+  float32x2_t __ret_911; \
+  __ret_911 = vfmlsl_high_f16(__s0_911, __s1_911, (float16x4_t) {vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911)}); \
   __ret_911; \
 })
 #else
-#define vqrdmlahh_lane_s16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \
-  int16_t __s0_912 = __p0_912; \
-  int16_t __s1_912 = __p1_912; \
-  int16x4_t __s2_912 = __p2_912; \
-  int16x4_t __rev2_912;  __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 3, 2, 1, 0); \
-  int16_t __ret_912; \
-  __ret_912 = vqaddh_s16(__s0_912, vqrdmulhh_s16(__s1_912, __noswap_vget_lane_s16(__rev2_912, __p3_912))); \
+#define vfmlsl_laneq_high_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \
+  float32x2_t __s0_912 = __p0_912; \
+  float16x4_t __s1_912 = __p1_912; \
+  float16x8_t __s2_912 = __p2_912; \
+  float32x2_t __rev0_912;  __rev0_912 = __builtin_shufflevector(__s0_912, __s0_912, 1, 0); \
+  float16x4_t __rev1_912;  __rev1_912 = __builtin_shufflevector(__s1_912, __s1_912, 3, 2, 1, 0); \
+  float16x8_t __rev2_912;  __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_912; \
+  __ret_912 = __noswap_vfmlsl_high_f16(__rev0_912, __rev1_912, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912)}); \
+  __ret_912 = __builtin_shufflevector(__ret_912, __ret_912, 1, 0); \
   __ret_912; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_laneq_s32(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \
-  int32_t __s0_913 = __p0_913; \
-  int32_t __s1_913 = __p1_913; \
-  int32x4_t __s2_913 = __p2_913; \
-  int32_t __ret_913; \
-  __ret_913 = vqadds_s32(__s0_913, vqrdmulhs_s32(__s1_913, vgetq_lane_s32(__s2_913, __p3_913))); \
+#define vfmlslq_laneq_low_f16(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \
+  float32x4_t __s0_913 = __p0_913; \
+  float16x8_t __s1_913 = __p1_913; \
+  float16x8_t __s2_913 = __p2_913; \
+  float32x4_t __ret_913; \
+  __ret_913 = vfmlslq_low_f16(__s0_913, __s1_913, (float16x8_t) {vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913)}); \
   __ret_913; \
 })
 #else
-#define vqrdmlahs_laneq_s32(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \
-  int32_t __s0_914 = __p0_914; \
-  int32_t __s1_914 = __p1_914; \
-  int32x4_t __s2_914 = __p2_914; \
-  int32x4_t __rev2_914;  __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 3, 2, 1, 0); \
-  int32_t __ret_914; \
-  __ret_914 = vqadds_s32(__s0_914, vqrdmulhs_s32(__s1_914, __noswap_vgetq_lane_s32(__rev2_914, __p3_914))); \
+#define vfmlslq_laneq_low_f16(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \
+  float32x4_t __s0_914 = __p0_914; \
+  float16x8_t __s1_914 = __p1_914; \
+  float16x8_t __s2_914 = __p2_914; \
+  float32x4_t __rev0_914;  __rev0_914 = __builtin_shufflevector(__s0_914, __s0_914, 3, 2, 1, 0); \
+  float16x8_t __rev1_914;  __rev1_914 = __builtin_shufflevector(__s1_914, __s1_914, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_914;  __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_914; \
+  __ret_914 = __noswap_vfmlslq_low_f16(__rev0_914, __rev1_914, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914)}); \
+  __ret_914 = __builtin_shufflevector(__ret_914, __ret_914, 3, 2, 1, 0); \
   __ret_914; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_laneq_s16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \
-  int16_t __s0_915 = __p0_915; \
-  int16_t __s1_915 = __p1_915; \
-  int16x8_t __s2_915 = __p2_915; \
-  int16_t __ret_915; \
-  __ret_915 = vqaddh_s16(__s0_915, vqrdmulhh_s16(__s1_915, vgetq_lane_s16(__s2_915, __p3_915))); \
+#define vfmlsl_laneq_low_f16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \
+  float32x2_t __s0_915 = __p0_915; \
+  float16x4_t __s1_915 = __p1_915; \
+  float16x8_t __s2_915 = __p2_915; \
+  float32x2_t __ret_915; \
+  __ret_915 = vfmlsl_low_f16(__s0_915, __s1_915, (float16x4_t) {vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915)}); \
   __ret_915; \
 })
 #else
-#define vqrdmlahh_laneq_s16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \
-  int16_t __s0_916 = __p0_916; \
-  int16_t __s1_916 = __p1_916; \
-  int16x8_t __s2_916 = __p2_916; \
-  int16x8_t __rev2_916;  __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_916; \
-  __ret_916 = vqaddh_s16(__s0_916, vqrdmulhh_s16(__s1_916, __noswap_vgetq_lane_s16(__rev2_916, __p3_916))); \
+#define vfmlsl_laneq_low_f16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \
+  float32x2_t __s0_916 = __p0_916; \
+  float16x4_t __s1_916 = __p1_916; \
+  float16x8_t __s2_916 = __p2_916; \
+  float32x2_t __rev0_916;  __rev0_916 = __builtin_shufflevector(__s0_916, __s0_916, 1, 0); \
+  float16x4_t __rev1_916;  __rev1_916 = __builtin_shufflevector(__s1_916, __s1_916, 3, 2, 1, 0); \
+  float16x8_t __rev2_916;  __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_916; \
+  __ret_916 = __noswap_vfmlsl_low_f16(__rev0_916, __rev1_916, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916)}); \
+  __ret_916 = __builtin_shufflevector(__ret_916, __ret_916, 1, 0); \
   __ret_916; \
 })
 #endif
 
-__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
+#endif
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_lane_s32(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \
-  int32_t __s0_917 = __p0_917; \
-  int32_t __s1_917 = __p1_917; \
-  int32x2_t __s2_917 = __p2_917; \
-  int32_t __ret_917; \
-  __ret_917 = vqsubs_s32(__s0_917, vqrdmulhs_s32(__s1_917, vget_lane_s32(__s2_917, __p3_917))); \
+#define vmulh_lane_f16(__p0_917, __p1_917, __p2_917) __extension__ ({ \
+  float16_t __s0_917 = __p0_917; \
+  float16x4_t __s1_917 = __p1_917; \
+  float16_t __ret_917; \
+  __ret_917 = __s0_917 * vget_lane_f16(__s1_917, __p2_917); \
   __ret_917; \
 })
 #else
-#define vqrdmlshs_lane_s32(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \
-  int32_t __s0_918 = __p0_918; \
-  int32_t __s1_918 = __p1_918; \
-  int32x2_t __s2_918 = __p2_918; \
-  int32x2_t __rev2_918;  __rev2_918 = __builtin_shufflevector(__s2_918, __s2_918, 1, 0); \
-  int32_t __ret_918; \
-  __ret_918 = vqsubs_s32(__s0_918, vqrdmulhs_s32(__s1_918, __noswap_vget_lane_s32(__rev2_918, __p3_918))); \
+#define vmulh_lane_f16(__p0_918, __p1_918, __p2_918) __extension__ ({ \
+  float16_t __s0_918 = __p0_918; \
+  float16x4_t __s1_918 = __p1_918; \
+  float16x4_t __rev1_918;  __rev1_918 = __builtin_shufflevector(__s1_918, __s1_918, 3, 2, 1, 0); \
+  float16_t __ret_918; \
+  __ret_918 = __s0_918 * __noswap_vget_lane_f16(__rev1_918, __p2_918); \
   __ret_918; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_lane_s16(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \
-  int16_t __s0_919 = __p0_919; \
-  int16_t __s1_919 = __p1_919; \
-  int16x4_t __s2_919 = __p2_919; \
-  int16_t __ret_919; \
-  __ret_919 = vqsubh_s16(__s0_919, vqrdmulhh_s16(__s1_919, vget_lane_s16(__s2_919, __p3_919))); \
+#define vmulh_laneq_f16(__p0_919, __p1_919, __p2_919) __extension__ ({ \
+  float16_t __s0_919 = __p0_919; \
+  float16x8_t __s1_919 = __p1_919; \
+  float16_t __ret_919; \
+  __ret_919 = __s0_919 * vgetq_lane_f16(__s1_919, __p2_919); \
   __ret_919; \
 })
 #else
-#define vqrdmlshh_lane_s16(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \
-  int16_t __s0_920 = __p0_920; \
-  int16_t __s1_920 = __p1_920; \
-  int16x4_t __s2_920 = __p2_920; \
-  int16x4_t __rev2_920;  __rev2_920 = __builtin_shufflevector(__s2_920, __s2_920, 3, 2, 1, 0); \
-  int16_t __ret_920; \
-  __ret_920 = vqsubh_s16(__s0_920, vqrdmulhh_s16(__s1_920, __noswap_vget_lane_s16(__rev2_920, __p3_920))); \
+#define vmulh_laneq_f16(__p0_920, __p1_920, __p2_920) __extension__ ({ \
+  float16_t __s0_920 = __p0_920; \
+  float16x8_t __s1_920 = __p1_920; \
+  float16x8_t __rev1_920;  __rev1_920 = __builtin_shufflevector(__s1_920, __s1_920, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16_t __ret_920; \
+  __ret_920 = __s0_920 * __noswap_vgetq_lane_f16(__rev1_920, __p2_920); \
   __ret_920; \
 })
 #endif
 
+#endif
+#if defined(__ARM_FEATURE_MATMUL_INT8)
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_laneq_s32(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \
-  int32_t __s0_921 = __p0_921; \
-  int32_t __s1_921 = __p1_921; \
-  int32x4_t __s2_921 = __p2_921; \
-  int32_t __ret_921; \
-  __ret_921 = vqsubs_s32(__s0_921, vqrdmulhs_s32(__s1_921, vgetq_lane_s32(__s2_921, __p3_921))); \
+#define vsudotq_lane_s32(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \
+  int32x4_t __s0_921 = __p0_921; \
+  int8x16_t __s1_921 = __p1_921; \
+  uint8x8_t __s2_921 = __p2_921; \
+  int32x4_t __ret_921; \
+uint8x8_t __reint_921 = __s2_921; \
+  __ret_921 = vusdotq_s32(__s0_921, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_921, __p3_921)), __s1_921); \
   __ret_921; \
 })
 #else
-#define vqrdmlshs_laneq_s32(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \
-  int32_t __s0_922 = __p0_922; \
-  int32_t __s1_922 = __p1_922; \
-  int32x4_t __s2_922 = __p2_922; \
-  int32x4_t __rev2_922;  __rev2_922 = __builtin_shufflevector(__s2_922, __s2_922, 3, 2, 1, 0); \
-  int32_t __ret_922; \
-  __ret_922 = vqsubs_s32(__s0_922, vqrdmulhs_s32(__s1_922, __noswap_vgetq_lane_s32(__rev2_922, __p3_922))); \
+#define vsudotq_lane_s32(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \
+  int32x4_t __s0_922 = __p0_922; \
+  int8x16_t __s1_922 = __p1_922; \
+  uint8x8_t __s2_922 = __p2_922; \
+  int32x4_t __rev0_922;  __rev0_922 = __builtin_shufflevector(__s0_922, __s0_922, 3, 2, 1, 0); \
+  int8x16_t __rev1_922;  __rev1_922 = __builtin_shufflevector(__s1_922, __s1_922, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_922;  __rev2_922 = __builtin_shufflevector(__s2_922, __s2_922, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_922; \
+uint8x8_t __reint_922 = __rev2_922; \
+  __ret_922 = __noswap_vusdotq_s32(__rev0_922, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_922, __p3_922)), __rev1_922); \
+  __ret_922 = __builtin_shufflevector(__ret_922, __ret_922, 3, 2, 1, 0); \
   __ret_922; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_laneq_s16(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \
-  int16_t __s0_923 = __p0_923; \
-  int16_t __s1_923 = __p1_923; \
-  int16x8_t __s2_923 = __p2_923; \
-  int16_t __ret_923; \
-  __ret_923 = vqsubh_s16(__s0_923, vqrdmulhh_s16(__s1_923, vgetq_lane_s16(__s2_923, __p3_923))); \
+#define vsudot_lane_s32(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \
+  int32x2_t __s0_923 = __p0_923; \
+  int8x8_t __s1_923 = __p1_923; \
+  uint8x8_t __s2_923 = __p2_923; \
+  int32x2_t __ret_923; \
+uint8x8_t __reint_923 = __s2_923; \
+  __ret_923 = vusdot_s32(__s0_923, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_923, __p3_923)), __s1_923); \
   __ret_923; \
 })
 #else
-#define vqrdmlshh_laneq_s16(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \
-  int16_t __s0_924 = __p0_924; \
-  int16_t __s1_924 = __p1_924; \
-  int16x8_t __s2_924 = __p2_924; \
-  int16x8_t __rev2_924;  __rev2_924 = __builtin_shufflevector(__s2_924, __s2_924, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_924; \
-  __ret_924 = vqsubh_s16(__s0_924, vqrdmulhh_s16(__s1_924, __noswap_vgetq_lane_s16(__rev2_924, __p3_924))); \
+#define vsudot_lane_s32(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \
+  int32x2_t __s0_924 = __p0_924; \
+  int8x8_t __s1_924 = __p1_924; \
+  uint8x8_t __s2_924 = __p2_924; \
+  int32x2_t __rev0_924;  __rev0_924 = __builtin_shufflevector(__s0_924, __s0_924, 1, 0); \
+  int8x8_t __rev1_924;  __rev1_924 = __builtin_shufflevector(__s1_924, __s1_924, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_924;  __rev2_924 = __builtin_shufflevector(__s2_924, __s2_924, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_924; \
+uint8x8_t __reint_924 = __rev2_924; \
+  __ret_924 = __noswap_vusdot_s32(__rev0_924, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_924, __p3_924)), __rev1_924); \
+  __ret_924 = __builtin_shufflevector(__ret_924, __ret_924, 1, 0); \
   __ret_924; \
 })
 #endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/arm_sve.h b/linux-x86/lib64/clang/14.0.6/include/arm_sve.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/arm_sve.h
rename to linux-x86/lib64/clang/14.0.6/include/arm_sve.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/armintr.h b/linux-x86/lib64/clang/14.0.6/include/armintr.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/armintr.h
rename to linux-x86/lib64/clang/14.0.6/include/armintr.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx2intrin.h b/linux-x86/lib64/clang/14.0.6/include/avx2intrin.h
similarity index 97%
rename from linux-x86/lib64/clang/14.0.2/include/avx2intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx2intrin.h
index 5064c87..e33514a 100644
--- a/linux-x86/lib64/clang/14.0.2/include/avx2intrin.h
+++ b/linux-x86/lib64/clang/14.0.6/include/avx2intrin.h
@@ -26,19 +26,19 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi8(__m256i __a)
 {
-    return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a);
+    return (__m256i)__builtin_elementwise_abs((__v32qs)__a);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi16(__m256i __a)
 {
-    return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a);
+    return (__m256i)__builtin_elementwise_abs((__v16hi)__a);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi32(__m256i __a)
 {
-    return (__m256i)__builtin_ia32_pabsd256((__v8si)__a);
+    return (__m256i)__builtin_elementwise_abs((__v8si)__a);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -253,73 +253,73 @@
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu8(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b);
+  return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu16(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b);
+  return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu32(__m256i __a, __m256i __b)
 {
-  return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b);
+  return (__m256i)__builtin_elementwise_min((__v8su)__a, (__v8su)__b);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS256
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512bf16intrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512bf16intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512bf16intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512bf16intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512bitalgintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512bitalgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512bitalgintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512bitalgintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512bwintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512bwintrin.h
similarity index 98%
copy from darwin-x86/lib64/clang/14.0.2/include/avx512bwintrin.h
copy to linux-x86/lib64/clang/14.0.6/include/avx512bwintrin.h
index 6aee8ae..522ef10 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512bwintrin.h
+++ b/linux-x86/lib64/clang/14.0.6/include/avx512bwintrin.h
@@ -485,7 +485,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi8 (__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsb512((__v64qi)__A);
+  return (__m512i)__builtin_elementwise_abs((__v64qs)__A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -507,7 +507,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi16 (__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsw512((__v32hi)__A);
+  return (__m512i)__builtin_elementwise_abs((__v32hi)__A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -751,7 +751,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epi8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsb512((__v64qi) __A, (__v64qi) __B);
+  return (__m512i)__builtin_elementwise_max((__v64qs) __A, (__v64qs) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -773,7 +773,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epi16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsw512((__v32hi) __A, (__v32hi) __B);
+  return (__m512i)__builtin_elementwise_max((__v32hi) __A, (__v32hi) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -796,7 +796,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxub512((__v64qi)__A, (__v64qi)__B);
+  return (__m512i)__builtin_elementwise_max((__v64qu)__A, (__v64qu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -818,7 +818,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxuw512((__v32hi)__A, (__v32hi)__B);
+  return (__m512i)__builtin_elementwise_max((__v32hu)__A, (__v32hu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -840,7 +840,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epi8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsb512((__v64qi) __A, (__v64qi) __B);
+  return (__m512i)__builtin_elementwise_min((__v64qs) __A, (__v64qs) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -862,7 +862,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epi16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsw512((__v32hi) __A, (__v32hi) __B);
+  return (__m512i)__builtin_elementwise_min((__v32hi) __A, (__v32hi) __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -884,7 +884,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu8 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminub512((__v64qi)__A, (__v64qi)__B);
+  return (__m512i)__builtin_elementwise_min((__v64qu)__A, (__v64qu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -906,7 +906,7 @@
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu16 (__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminuw512((__v32hi)__A, (__v32hi)__B);
+  return (__m512i)__builtin_elementwise_min((__v32hu)__A, (__v32hu)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512cdintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512cdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512cdintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512cdintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512dqintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512dqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512dqintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512dqintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512erintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512erintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512erintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512erintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512fintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512fintrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/avx512fintrin.h
copy to linux-x86/lib64/clang/14.0.6/include/avx512fintrin.h
index df29864..50e0e28 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512fintrin.h
+++ b/linux-x86/lib64/clang/14.0.6/include/avx512fintrin.h
@@ -26,6 +26,10 @@
 typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
 typedef unsigned int __v16su __attribute__((__vector_size__(64)));
 
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v64qs __attribute__((__vector_size__(64)));
+
 typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64)));
 typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64)));
 typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64)));
@@ -1086,7 +1090,7 @@
 __DEFAULT_FN_ATTRS512
 _mm512_max_epi32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_max((__v16si)__A, (__v16si)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1108,7 +1112,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_max((__v16su)__A, (__v16su)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1130,7 +1134,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epi64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_max((__v8di)__A, (__v8di)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1152,7 +1156,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_max_epu64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_max((__v8du)__A, (__v8du)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1321,7 +1325,7 @@
 __DEFAULT_FN_ATTRS512
 _mm512_min_epi32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_min((__v16si)__A, (__v16si)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1343,7 +1347,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu32(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B);
+  return (__m512i)__builtin_elementwise_min((__v16su)__A, (__v16su)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1365,7 +1369,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epi64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_min((__v8di)__A, (__v8di)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1387,7 +1391,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_min_epu64(__m512i __A, __m512i __B)
 {
-  return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B);
+  return (__m512i)__builtin_elementwise_min((__v8du)__A, (__v8du)__B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1846,7 +1850,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi64(__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsq512((__v8di)__A);
+  return (__m512i)__builtin_elementwise_abs((__v8di)__A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1868,7 +1872,7 @@
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_abs_epi32(__m512i __A)
 {
-  return (__m512i)__builtin_ia32_pabsd512((__v16si) __A);
+  return (__m512i)__builtin_elementwise_abs((__v16si) __A);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -9320,11 +9324,11 @@
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_and_q512(__W);
+  return __builtin_reduce_and((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
-  return __builtin_ia32_reduce_or_q512(__W);
+  return __builtin_reduce_or((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
@@ -9342,13 +9346,13 @@
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
   __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
-  return __builtin_ia32_reduce_and_q512(__W);
+  return __builtin_reduce_and((__v8di)__W);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
   __W = _mm512_maskz_mov_epi64(__M, __W);
-  return __builtin_ia32_reduce_or_q512(__W);
+  return __builtin_reduce_or((__v8di)__W);
 }
 
 // -0.0 is used to ignore the start value since it is the neutral value of
@@ -9386,12 +9390,12 @@
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_and_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_and_d512((__v16si)__W);
+  return __builtin_reduce_and((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_or_epi32(__m512i __W) {
-  return __builtin_ia32_reduce_or_d512((__v16si)__W);
+  return __builtin_reduce_or((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
@@ -9409,13 +9413,13 @@
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
   __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
-  return __builtin_ia32_reduce_and_d512((__v16si)__W);
+  return __builtin_reduce_and((__v16si)__W);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
   __W = _mm512_maskz_mov_epi32(__M, __W);
-  return __builtin_ia32_reduce_or_d512((__v16si)__W);
+  return __builtin_reduce_or((__v16si)__W);
 }
 
 static __inline__ float __DEFAULT_FN_ATTRS512
@@ -9442,89 +9446,89 @@
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epi64(__m512i __V) {
-  return __builtin_ia32_reduce_smax_q512(__V);
+  return __builtin_reduce_max((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epu64(__m512i __V) {
-  return __builtin_ia32_reduce_umax_q512(__V);
+  return __builtin_reduce_max((__v8du)__V);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epi64(__m512i __V) {
-  return __builtin_ia32_reduce_smin_q512(__V);
+  return __builtin_reduce_min((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epu64(__m512i __V) {
-  return __builtin_ia32_reduce_umin_q512(__V);
+  return __builtin_reduce_min((__v8du)__V);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
-  return __builtin_ia32_reduce_smax_q512(__V);
+  return __builtin_reduce_max((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
   __V = _mm512_maskz_mov_epi64(__M, __V);
-  return __builtin_ia32_reduce_umax_q512(__V);
+  return __builtin_reduce_max((__v8du)__V);
 }
 
 static __inline__ long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
-  return __builtin_ia32_reduce_smin_q512(__V);
+  return __builtin_reduce_min((__v8di)__V);
 }
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
-  return __builtin_ia32_reduce_umin_q512(__V);
+  return __builtin_reduce_min((__v8du)__V);
 }
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epi32(__m512i __V) {
-  return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_reduce_max_epu32(__m512i __V) {
-  return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16su)__V);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epi32(__m512i __V) {
-  return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_reduce_min_epu32(__m512i __V) {
-  return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16su)__V);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
-  return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
   __V = _mm512_maskz_mov_epi32(__M, __V);
-  return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+  return __builtin_reduce_max((__v16su)__V);
 }
 
 static __inline__ int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
-  return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16si)__V);
 }
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS512
 _mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
   __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
-  return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+  return __builtin_reduce_min((__v16su)__V);
 }
 
 static __inline__ double __DEFAULT_FN_ATTRS512
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512fp16intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512fp16intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512fp16intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512ifmaintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512ifmaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512ifmaintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512ifmaintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512ifmavlintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512ifmavlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512ifmavlintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512ifmavlintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512pfintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512pfintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512pfintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512pfintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vbmi2intrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vbmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vbmi2intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vbmi2intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vbmiintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vbmiintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vbmiintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vbmiintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vbmivlintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vbmivlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vbmivlintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vbmivlintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vlbf16intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vlbf16intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vlbf16intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlbitalgintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vlbitalgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vlbitalgintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vlbitalgintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vlbwintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vlbwintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vlbwintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlcdintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vlcdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vlcdintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vlcdintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vldqintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vldqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vldqintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vldqintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vlfp16intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vlfp16intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vlfp16intrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/avx512vlintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vlintrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/avx512vlintrin.h
copy to linux-x86/lib64/clang/14.0.6/include/avx512vlintrin.h
index 0519dba..178c9db 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/avx512vlintrin.h
+++ b/linux-x86/lib64/clang/14.0.6/include/avx512vlintrin.h
@@ -2988,7 +2988,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_abs_epi64 (__m128i __A) {
-  return (__m128i)__builtin_ia32_pabsq128((__v2di)__A);
+  return (__m128i)__builtin_elementwise_abs((__v2di)__A);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3007,7 +3007,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_abs_epi64 (__m256i __A) {
-  return (__m256i)__builtin_ia32_pabsq256 ((__v4di)__A);
+  return (__m256i)__builtin_elementwise_abs((__v4di)__A);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3054,7 +3054,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_max_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pmaxsq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_max((__v2di)__A, (__v2di)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3073,7 +3073,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pmaxsq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_max((__v4di)__A, (__v4di)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3120,7 +3120,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_max_epu64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pmaxuq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_max((__v2du)__A, (__v2du)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3139,7 +3139,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_max_epu64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pmaxuq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_max((__v4du)__A, (__v4du)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3186,7 +3186,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_min_epi64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pminsq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_min((__v2di)__A, (__v2di)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3205,7 +3205,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epi64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pminsq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_min((__v4di)__A, (__v4di)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3252,7 +3252,7 @@
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_min_epu64 (__m128i __A, __m128i __B) {
-  return (__m128i)__builtin_ia32_pminuq128((__v2di)__A, (__v2di)__B);
+  return (__m128i)__builtin_elementwise_min((__v2du)__A, (__v2du)__B);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3271,7 +3271,7 @@
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_min_epu64 (__m256i __A, __m256i __B) {
-  return (__m256i)__builtin_ia32_pminuq256((__v4di)__A, (__v4di)__B);
+  return (__m256i)__builtin_elementwise_min((__v4du)__A, (__v4du)__B);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlvbmi2intrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vlvbmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vlvbmi2intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vlvbmi2intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vlvnniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vlvnniintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vlvnniintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vlvp2intersectintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vlvp2intersectintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vlvp2intersectintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vlvp2intersectintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vnniintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vnniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vnniintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vnniintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vp2intersectintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vp2intersectintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vp2intersectintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vp2intersectintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vpopcntdqintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vpopcntdqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vpopcntdqintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vpopcntdqintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avx512vpopcntdqvlintrin.h b/linux-x86/lib64/clang/14.0.6/include/avx512vpopcntdqvlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avx512vpopcntdqvlintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avx512vpopcntdqvlintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avxintrin.h b/linux-x86/lib64/clang/14.0.6/include/avxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avxintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avxintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/avxvnniintrin.h b/linux-x86/lib64/clang/14.0.6/include/avxvnniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/avxvnniintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/avxvnniintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/bits/stdatomic.h b/linux-x86/lib64/clang/14.0.6/include/bits/stdatomic.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/bits/stdatomic.h
rename to linux-x86/lib64/clang/14.0.6/include/bits/stdatomic.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/bmi2intrin.h b/linux-x86/lib64/clang/14.0.6/include/bmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/bmi2intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/bmi2intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/bmiintrin.h b/linux-x86/lib64/clang/14.0.6/include/bmiintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/bmiintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/bmiintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/builtins.h b/linux-x86/lib64/clang/14.0.6/include/builtins.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/builtins.h
rename to linux-x86/lib64/clang/14.0.6/include/builtins.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/cet.h b/linux-x86/lib64/clang/14.0.6/include/cet.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/cet.h
rename to linux-x86/lib64/clang/14.0.6/include/cet.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cetintrin.h b/linux-x86/lib64/clang/14.0.6/include/cetintrin.h
similarity index 91%
copy from darwin-x86/lib64/clang/14.0.2/include/cetintrin.h
copy to linux-x86/lib64/clang/14.0.6/include/cetintrin.h
index 4290e9d..019cab0 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/cetintrin.h
+++ b/linux-x86/lib64/clang/14.0.6/include/cetintrin.h
@@ -42,10 +42,20 @@
   return __builtin_ia32_rdsspd(__a);
 }
 
+static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd_i32() {
+  unsigned int t;
+  return __builtin_ia32_rdsspd(t);
+}
+
 #ifdef __x86_64__
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long long __a) {
   return __builtin_ia32_rdsspq(__a);
 }
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq_i64() {
+  unsigned long long t;
+  return __builtin_ia32_rdsspq(t);
+}
 #endif /* __x86_64__ */
 
 #ifdef __x86_64__
diff --git a/linux-x86/lib64/clang/14.0.2/include/cldemoteintrin.h b/linux-x86/lib64/clang/14.0.6/include/cldemoteintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/cldemoteintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/cldemoteintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/clflushoptintrin.h b/linux-x86/lib64/clang/14.0.6/include/clflushoptintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/clflushoptintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/clflushoptintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/clwbintrin.h b/linux-x86/lib64/clang/14.0.6/include/clwbintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/clwbintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/clwbintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/clzerointrin.h b/linux-x86/lib64/clang/14.0.6/include/clzerointrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/clzerointrin.h
rename to linux-x86/lib64/clang/14.0.6/include/clzerointrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/cpuid.h b/linux-x86/lib64/clang/14.0.6/include/cpuid.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/cpuid.h
copy to linux-x86/lib64/clang/14.0.6/include/cpuid.h
index 6df1b4a..5d262a6 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/cpuid.h
+++ b/linux-x86/lib64/clang/14.0.6/include/cpuid.h
@@ -200,7 +200,7 @@
 #define bit_AMXINT8       0x02000000
 
 /* Features in %eax for leaf 7 sub-leaf 1 */
-#define bit_AVXVNNI       0x00000008
+#define bit_AVXVNNI       0x00000010
 #define bit_AVX512BF16    0x00000020
 #define bit_HRESET        0x00400000
 
diff --git a/linux-x86/lib64/clang/14.0.2/include/crc32intrin.h b/linux-x86/lib64/clang/14.0.6/include/crc32intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/crc32intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/crc32intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/cuda_wrappers/algorithm b/linux-x86/lib64/clang/14.0.6/include/cuda_wrappers/algorithm
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/cuda_wrappers/algorithm
rename to linux-x86/lib64/clang/14.0.6/include/cuda_wrappers/algorithm
diff --git a/linux-x86/lib64/clang/14.0.2/include/cuda_wrappers/complex b/linux-x86/lib64/clang/14.0.6/include/cuda_wrappers/complex
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/cuda_wrappers/complex
rename to linux-x86/lib64/clang/14.0.6/include/cuda_wrappers/complex
diff --git a/linux-x86/lib64/clang/14.0.2/include/cuda_wrappers/new b/linux-x86/lib64/clang/14.0.6/include/cuda_wrappers/new
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/cuda_wrappers/new
rename to linux-x86/lib64/clang/14.0.6/include/cuda_wrappers/new
diff --git a/darwin-x86/lib64/clang/14.0.2/include/emmintrin.h b/linux-x86/lib64/clang/14.0.6/include/emmintrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/emmintrin.h
copy to linux-x86/lib64/clang/14.0.6/include/emmintrin.h
index 6e9c303..4618b80 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/emmintrin.h
+++ b/linux-x86/lib64/clang/14.0.6/include/emmintrin.h
@@ -2375,7 +2375,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_max_epi16(__m128i __a, __m128i __b)
 {
-  return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
+  return (__m128i)__builtin_elementwise_max((__v8hi)__a, (__v8hi)__b);
 }
 
 /// Compares corresponding elements of two 128-bit unsigned [16 x i8]
@@ -2395,7 +2395,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_max_epu8(__m128i __a, __m128i __b)
 {
-  return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
+  return (__m128i)__builtin_elementwise_max((__v16qu)__a, (__v16qu)__b);
 }
 
 /// Compares corresponding elements of two 128-bit signed [8 x i16]
@@ -2415,7 +2415,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_min_epi16(__m128i __a, __m128i __b)
 {
-  return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
+  return (__m128i)__builtin_elementwise_min((__v8hi)__a, (__v8hi)__b);
 }
 
 /// Compares corresponding elements of two 128-bit unsigned [16 x i8]
@@ -2435,7 +2435,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_min_epu8(__m128i __a, __m128i __b)
 {
-  return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
+  return (__m128i)__builtin_elementwise_min((__v16qu)__a, (__v16qu)__b);
 }
 
 /// Multiplies the corresponding elements of two signed [8 x i16]
diff --git a/linux-x86/lib64/clang/14.0.2/include/enqcmdintrin.h b/linux-x86/lib64/clang/14.0.6/include/enqcmdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/enqcmdintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/enqcmdintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/f16cintrin.h b/linux-x86/lib64/clang/14.0.6/include/f16cintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/f16cintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/f16cintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/float.h b/linux-x86/lib64/clang/14.0.6/include/float.h
similarity index 83%
copy from darwin-x86/lib64/clang/14.0.2/include/float.h
copy to linux-x86/lib64/clang/14.0.6/include/float.h
index ed610b2..c6a6cc0 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/float.h
+++ b/linux-x86/lib64/clang/14.0.6/include/float.h
@@ -14,10 +14,11 @@
  * additional definitions provided for Windows.
  * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
  *
- * Also fall back on Darwin to allow additional definitions and
+ * Also fall back on Darwin and AIX to allow additional definitions and
  * implementation-defined values.
  */
-#if (defined(__APPLE__) || (defined(__MINGW32__) || defined(_MSC_VER))) && \
+#if (defined(__APPLE__) || defined(__MINGW32__) || defined(_MSC_VER) ||        \
+     defined(_AIX)) &&                                                         \
     __STDC_HOSTED__ && __has_include_next(<float.h>)
 
 /* Prior to Apple's 10.7 SDK, float.h SDK header used to apply an extra level
@@ -37,7 +38,9 @@
 #  undef FLT_MANT_DIG
 #  undef DBL_MANT_DIG
 #  undef LDBL_MANT_DIG
-#  if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
+#  if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) ||              \
+      __cplusplus >= 201103L ||                                                \
+      (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #    undef DECIMAL_DIG
 #  endif
 #  undef FLT_DIG
@@ -64,7 +67,9 @@
 #  undef FLT_MIN
 #  undef DBL_MIN
 #  undef LDBL_MIN
-#  if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
+#  if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) ||              \
+      __cplusplus >= 201703L ||                                                \
+      (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #    undef FLT_TRUE_MIN
 #    undef DBL_TRUE_MIN
 #    undef LDBL_TRUE_MIN
@@ -87,7 +92,9 @@
 #define DBL_MANT_DIG __DBL_MANT_DIG__
 #define LDBL_MANT_DIG __LDBL_MANT_DIG__
 
-#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
+#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) ||                \
+    __cplusplus >= 201103L ||                                                  \
+    (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #  define DECIMAL_DIG __DECIMAL_DIG__
 #endif
 
@@ -123,7 +130,9 @@
 #define DBL_MIN __DBL_MIN__
 #define LDBL_MIN __LDBL_MIN__
 
-#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
+#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) ||                \
+    __cplusplus >= 201703L ||                                                  \
+    (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
 #  define FLT_TRUE_MIN __FLT_DENORM_MIN__
 #  define DBL_TRUE_MIN __DBL_DENORM_MIN__
 #  define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
diff --git a/linux-x86/lib64/clang/14.0.2/include/fma4intrin.h b/linux-x86/lib64/clang/14.0.6/include/fma4intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/fma4intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/fma4intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/fmaintrin.h b/linux-x86/lib64/clang/14.0.6/include/fmaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/fmaintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/fmaintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/fuzzer/FuzzedDataProvider.h b/linux-x86/lib64/clang/14.0.6/include/fuzzer/FuzzedDataProvider.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/fuzzer/FuzzedDataProvider.h
rename to linux-x86/lib64/clang/14.0.6/include/fuzzer/FuzzedDataProvider.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/fxsrintrin.h b/linux-x86/lib64/clang/14.0.6/include/fxsrintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/fxsrintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/fxsrintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/gfniintrin.h b/linux-x86/lib64/clang/14.0.6/include/gfniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/gfniintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/gfniintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/hexagon_circ_brev_intrinsics.h b/linux-x86/lib64/clang/14.0.6/include/hexagon_circ_brev_intrinsics.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/hexagon_circ_brev_intrinsics.h
rename to linux-x86/lib64/clang/14.0.6/include/hexagon_circ_brev_intrinsics.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hexagon_protos.h b/linux-x86/lib64/clang/14.0.6/include/hexagon_protos.h
similarity index 99%
copy from darwin-x86/lib64/clang/14.0.2/include/hexagon_protos.h
copy to linux-x86/lib64/clang/14.0.6/include/hexagon_protos.h
index cdffd93..2642f3c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hexagon_protos.h
+++ b/linux-x86/lib64/clang/14.0.6/include/hexagon_protos.h
@@ -8003,17 +8003,6 @@
 #define Q6_P_vtrunohb_PP __builtin_HEXAGON_S6_vtrunohb_ppp
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
-   Assembly Syntax:       Vd32=vmem(Rt32):nt
-   C Intrinsic Prototype: HVX_Vector Q6_V_vmem_R_nt(Word32 Rt)
-   Instruction Type:      MAPPING
-   Execution Slots:       SLOT0123
-   ========================================================================== */
-
-#define Q6_V_vmem_R_nt __builtin_HEXAGON_V6_ldntnt0
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
 #if __HEXAGON_ARCH__ >= 65
 /* ==========================================================================
    Assembly Syntax:       Pd4=!any8(vcmpb.eq(Rss32,Rtt32))
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hexagon_types.h b/linux-x86/lib64/clang/14.0.6/include/hexagon_types.h
similarity index 98%
copy from darwin-x86/lib64/clang/14.0.2/include/hexagon_types.h
copy to linux-x86/lib64/clang/14.0.6/include/hexagon_types.h
index 6958809..029727c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hexagon_types.h
+++ b/linux-x86/lib64/clang/14.0.6/include/hexagon_types.h
@@ -1177,37 +1177,6 @@
 
 #endif /* __cplusplus */
 
-// V65 Silver types
-#if __Q6S_ARCH__ >= 65
-  // Silver vector types are 128 bytes, and pairs are 256. The vector predicate
-  // types are 16 bytes and 32 bytes for pairs.
-  typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(16)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_VecPred256 __attribute__((__vector_size__(32)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(128)));
-
-  typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(256)));
-
-  typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
-    __attribute__((aligned(4)));
-
-  typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))
-    __attribute__((aligned(4)));
-
-  #define Q6S_VectorPredPair HEXAGON_VecPred256
-  #define Q6S_VectorPred     HEXAGON_VecPred128
-  #define Q6S_Vector         HEXAGON_Vect1024
-  #define Q6S_VectorPair     HEXAGON_Vect2048
-  #define Q6S_UVector        HEXAGON_UVect1024
-  #define Q6S_UVectorPair    HEXAGON_UVect2048
-
-#else /* __Q6S_ARCH__ >= 65 */
-
 // V65 Vector types
 #if __HVX_ARCH__ >= 65
 #if defined __HVX__ && (__HVX_LENGTH__ == 128)
@@ -1256,7 +1225,6 @@
 #endif /* defined __HVX__ &&  (__HVX_LENGTH__ == 64) */
 #endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
 #endif /* __HVX_ARCH__ >= 65 */
-#endif /* __Q6S_ARCH__ >= 65 */
 
 /* Predicates */
 
diff --git a/linux-x86/lib64/clang/14.0.2/include/hresetintrin.h b/linux-x86/lib64/clang/14.0.6/include/hresetintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/hresetintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/hresetintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/htmintrin.h b/linux-x86/lib64/clang/14.0.6/include/htmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/htmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/htmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/htmxlintrin.h b/linux-x86/lib64/clang/14.0.6/include/htmxlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/htmxlintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/htmxlintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h b/linux-x86/lib64/clang/14.0.6/include/hvx_hexagon_protos.h
similarity index 67%
copy from darwin-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h
copy to linux-x86/lib64/clang/14.0.6/include/hvx_hexagon_protos.h
index 41ce7a6..7e3679a 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/hvx_hexagon_protos.h
+++ b/linux-x86/lib64/clang/14.0.6/include/hvx_hexagon_protos.h
@@ -9,7 +9,6 @@
 //===----------------------------------------------------------------------===//
 
 
-
 #ifndef _HVX_HEXAGON_PROTOS_H_
 #define _HVX_HEXAGON_PROTOS_H_ 1
 
@@ -28,7 +27,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_R_vextract_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)
+#define Q6_R_vextract_VR(Vu,Rs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)(Vu,Rs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -39,7 +38,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_hi_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)
+#define Q6_V_hi_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)(Vss)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -50,7 +49,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_lo_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)
+#define Q6_V_lo_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)(Vss)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -61,7 +60,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)
+#define Q6_V_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)(Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -72,7 +71,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_and_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)
+#define Q6_Q_and_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -83,7 +82,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_and_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)
+#define Q6_Q_and_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -94,7 +93,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_not_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)
+#define Q6_Q_not_Q(Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -105,7 +104,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_or_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)
+#define Q6_Q_or_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -116,7 +115,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_or_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)
+#define Q6_Q_or_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -127,7 +126,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vsetq_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)
+#define Q6_Q_vsetq_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)(Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -138,7 +137,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_xor_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)
+#define Q6_Q_xor_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -149,7 +148,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QnRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)
+#define Q6_vmem_QnRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -160,7 +159,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QnRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)
+#define Q6_vmem_QnRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -171,7 +170,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)
+#define Q6_vmem_QRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -182,7 +181,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vmem_QRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)
+#define Q6_vmem_QRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -193,7 +192,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuh_vabsdiff_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)
+#define Q6_Vuh_vabsdiff_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -204,7 +203,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vub_vabsdiff_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)
+#define Q6_Vub_vabsdiff_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -215,7 +214,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuh_vabsdiff_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)
+#define Q6_Vuh_vabsdiff_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -226,7 +225,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vabsdiff_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)
+#define Q6_Vuw_vabsdiff_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -237,7 +236,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vabs_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)
+#define Q6_Vh_vabs_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -248,7 +247,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vabs_Vh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)
+#define Q6_Vh_vabs_Vh_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -259,7 +258,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vabs_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)
+#define Q6_Vw_vabs_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -270,7 +269,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vabs_Vw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)
+#define Q6_Vw_vabs_Vw_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -281,7 +280,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vadd_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)
+#define Q6_Vb_vadd_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -292,7 +291,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vadd_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)
+#define Q6_Wb_vadd_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -303,7 +302,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condacc_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)
+#define Q6_Vb_condacc_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -314,7 +313,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)
+#define Q6_Vb_condacc_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -325,7 +324,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)
+#define Q6_Vh_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -336,7 +335,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vadd_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)
+#define Q6_Wh_vadd_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -347,7 +346,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condacc_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)
+#define Q6_Vh_condacc_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -358,7 +357,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)
+#define Q6_Vh_condacc_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -369,7 +368,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vadd_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)
+#define Q6_Vh_vadd_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -380,7 +379,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vadd_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)
+#define Q6_Wh_vadd_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -391,7 +390,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)
+#define Q6_Ww_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -402,7 +401,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vadd_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)
+#define Q6_Wh_vadd_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -413,7 +412,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vadd_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)
+#define Q6_Vub_vadd_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -424,7 +423,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wub_vadd_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)
+#define Q6_Wub_vadd_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -435,7 +434,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vadd_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)
+#define Q6_Vuh_vadd_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -446,7 +445,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vadd_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)
+#define Q6_Wuh_vadd_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -457,7 +456,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vadd_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)
+#define Q6_Ww_vadd_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -468,7 +467,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)
+#define Q6_Vw_vadd_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -479,7 +478,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vadd_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)
+#define Q6_Ww_vadd_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -490,7 +489,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condacc_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)
+#define Q6_Vw_condacc_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -501,7 +500,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)
+#define Q6_Vw_condacc_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -512,7 +511,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)
+#define Q6_Vw_vadd_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -523,7 +522,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vadd_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)
+#define Q6_Ww_vadd_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -534,7 +533,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_valign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)
+#define Q6_V_valign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -545,7 +544,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_valign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)
+#define Q6_V_valign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -556,7 +555,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vand_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)
+#define Q6_V_vand_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -567,7 +566,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vand_QR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)
+#define Q6_V_vand_QR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -578,7 +577,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vandor_VQR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)
+#define Q6_V_vandor_VQR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -589,7 +588,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Q_vand_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)
+#define Q6_Q_vand_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)(Vu,Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -600,7 +599,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Q_vandor_QVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)
+#define Q6_Q_vandor_QVR(Qx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -611,7 +610,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasl_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)
+#define Q6_Vh_vasl_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -622,7 +621,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasl_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)
+#define Q6_Vh_vasl_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -633,7 +632,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasl_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)
+#define Q6_Vw_vasl_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -644,7 +643,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vaslacc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)
+#define Q6_Vw_vaslacc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -655,7 +654,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasl_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)
+#define Q6_Vw_vasl_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -666,7 +665,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)
+#define Q6_Vh_vasr_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -677,7 +676,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)
+#define Q6_Vb_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -688,7 +687,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)
+#define Q6_Vub_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -699,7 +698,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)
+#define Q6_Vub_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -710,7 +709,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)
+#define Q6_Vh_vasr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -721,7 +720,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasr_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)
+#define Q6_Vw_vasr_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -732,7 +731,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasracc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)
+#define Q6_Vw_vasracc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -743,7 +742,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)
+#define Q6_Vh_vasr_VwVwR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -754,7 +753,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)
+#define Q6_Vh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -765,7 +764,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)
+#define Q6_Vh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -776,7 +775,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)
+#define Q6_Vuh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -787,7 +786,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vasr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)
+#define Q6_Vw_vasr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -798,7 +797,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_equals_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)
+#define Q6_V_equals_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -809,7 +808,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_equals_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)
+#define Q6_W_equals_W(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)(Vuu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -820,7 +819,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)
+#define Q6_Vh_vavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -831,7 +830,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vavg_VhVh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)
+#define Q6_Vh_vavg_VhVh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -842,7 +841,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)
+#define Q6_Vub_vavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -853,7 +852,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vavg_VubVub_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)
+#define Q6_Vub_vavg_VubVub_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -864,7 +863,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vavg_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)
+#define Q6_Vuh_vavg_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -875,7 +874,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vavg_VuhVuh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)
+#define Q6_Vuh_vavg_VuhVuh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -886,7 +885,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)
+#define Q6_Vw_vavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -897,7 +896,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vavg_VwVw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)
+#define Q6_Vw_vavg_VwVw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -908,7 +907,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vcl0_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)
+#define Q6_Vuh_vcl0_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -919,7 +918,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vcl0_Vuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)
+#define Q6_Vuw_vcl0_Vuw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -930,7 +929,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vcombine_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)
+#define Q6_W_vcombine_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -941,7 +940,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)
+#define Q6_V_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)()
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -952,7 +951,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vdeal_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)
+#define Q6_Vb_vdeal_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -963,7 +962,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vdeale_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)
+#define Q6_Vb_vdeale_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -974,7 +973,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vdeal_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)
+#define Q6_Vh_vdeal_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -985,7 +984,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vdeal_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)
+#define Q6_W_vdeal_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -996,7 +995,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)
+#define Q6_V_vdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1007,7 +1006,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vdmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)
+#define Q6_Vh_vdmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1018,7 +1017,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vdmpyacc_VhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)
+#define Q6_Vh_vdmpyacc_VhVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1029,7 +1028,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vdmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)
+#define Q6_Wh_vdmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1040,7 +1039,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vdmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)
+#define Q6_Wh_vdmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1051,7 +1050,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)
+#define Q6_Vw_vdmpy_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1062,7 +1061,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)
+#define Q6_Vw_vdmpyacc_VwVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1073,7 +1072,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vdmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)
+#define Q6_Ww_vdmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1084,7 +1083,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vdmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)
+#define Q6_Ww_vdmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1095,7 +1094,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_WhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)
+#define Q6_Vw_vdmpy_WhRh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1106,29 +1105,29 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwWhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)
+#define Q6_Vw_vdmpyacc_VwWhRh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)(Vx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.h):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)
+#define Q6_Vw_vdmpy_VhRh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)
+#define Q6_Vw_vdmpyacc_VwVhRh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1139,7 +1138,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_WhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)
+#define Q6_Vw_vdmpy_WhRuh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1150,40 +1149,40 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwWhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)
+#define Q6_Vw_vdmpyacc_VwWhRuh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)(Vx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)
+#define Q6_Vw_vdmpy_VhRuh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)
+#define Q6_Vw_vdmpyacc_VwVhRuh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.w=vdmpy(Vu32.h,Vv32.h):sat
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpy_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)
+#define Q6_Vw_vdmpy_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1194,7 +1193,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vdmpyacc_VwVhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)
+#define Q6_Vw_vdmpyacc_VwVhVh_sat(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1205,7 +1204,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vdsad_WuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)
+#define Q6_Wuw_vdsad_WuhRuh(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1216,7 +1215,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vdsadacc_WuwWuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)
+#define Q6_Wuw_vdsadacc_WuwWuhRuh(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1227,7 +1226,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eq_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)
+#define Q6_Q_vcmp_eq_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1238,7 +1237,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)
+#define Q6_Q_vcmp_eqand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1249,7 +1248,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)
+#define Q6_Q_vcmp_eqor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1260,7 +1259,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)
+#define Q6_Q_vcmp_eqxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1271,7 +1270,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eq_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)
+#define Q6_Q_vcmp_eq_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1282,7 +1281,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)
+#define Q6_Q_vcmp_eqand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1293,7 +1292,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)
+#define Q6_Q_vcmp_eqor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1304,7 +1303,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)
+#define Q6_Q_vcmp_eqxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1315,7 +1314,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eq_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)
+#define Q6_Q_vcmp_eq_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1326,7 +1325,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)
+#define Q6_Q_vcmp_eqand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1337,7 +1336,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)
+#define Q6_Q_vcmp_eqor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1348,7 +1347,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_eqxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)
+#define Q6_Q_vcmp_eqxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1359,7 +1358,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)
+#define Q6_Q_vcmp_gt_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1370,7 +1369,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)
+#define Q6_Q_vcmp_gtand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1381,7 +1380,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)
+#define Q6_Q_vcmp_gtor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1392,7 +1391,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)
+#define Q6_Q_vcmp_gtxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1403,7 +1402,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)
+#define Q6_Q_vcmp_gt_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1414,7 +1413,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)
+#define Q6_Q_vcmp_gtand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1425,7 +1424,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)
+#define Q6_Q_vcmp_gtor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1436,7 +1435,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)
+#define Q6_Q_vcmp_gtxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1447,7 +1446,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)
+#define Q6_Q_vcmp_gt_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1458,7 +1457,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)
+#define Q6_Q_vcmp_gtand_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1469,7 +1468,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)
+#define Q6_Q_vcmp_gtor_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1480,7 +1479,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)
+#define Q6_Q_vcmp_gtxacc_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1491,7 +1490,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)
+#define Q6_Q_vcmp_gt_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1502,7 +1501,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)
+#define Q6_Q_vcmp_gtand_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1513,7 +1512,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)
+#define Q6_Q_vcmp_gtor_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1524,7 +1523,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)
+#define Q6_Q_vcmp_gtxacc_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1535,7 +1534,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)
+#define Q6_Q_vcmp_gt_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1546,7 +1545,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)
+#define Q6_Q_vcmp_gtand_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1557,7 +1556,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)
+#define Q6_Q_vcmp_gtor_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1568,7 +1567,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)
+#define Q6_Q_vcmp_gtxacc_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1579,7 +1578,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gt_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)
+#define Q6_Q_vcmp_gt_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)(Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1590,7 +1589,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)
+#define Q6_Q_vcmp_gtand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1601,7 +1600,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)
+#define Q6_Q_vcmp_gtor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1612,7 +1611,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vcmp_gtxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)
+#define Q6_Q_vcmp_gtxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1623,7 +1622,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vinsert_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)
+#define Q6_Vw_vinsert_VwR(Vx,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)(Vx,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1634,7 +1633,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vlalign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)
+#define Q6_V_vlalign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1645,7 +1644,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vlalign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)
+#define Q6_V_vlalign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1656,7 +1655,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vlsr_VuhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)
+#define Q6_Vuh_vlsr_VuhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1667,7 +1666,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vlsr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)
+#define Q6_Vh_vlsr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1678,7 +1677,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vlsr_VuwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)
+#define Q6_Vuw_vlsr_VuwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1689,7 +1688,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vlsr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)
+#define Q6_Vw_vlsr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1700,7 +1699,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32_VbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)
+#define Q6_Vb_vlut32_VbVbR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1711,7 +1710,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32or_VbVbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)
+#define Q6_Vb_vlut32or_VbVbVbR(Vx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)(Vx,Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1722,7 +1721,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16_VbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)
+#define Q6_Wh_vlut16_VbVhR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1733,7 +1732,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16or_WhVbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)
+#define Q6_Wh_vlut16or_WhVbVhR(Vxx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)(Vxx,Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1744,7 +1743,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vmax_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)
+#define Q6_Vh_vmax_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1755,7 +1754,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vmax_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)
+#define Q6_Vub_vmax_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1766,7 +1765,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vmax_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)
+#define Q6_Vuh_vmax_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1777,7 +1776,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vmax_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)
+#define Q6_Vw_vmax_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1788,7 +1787,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vmin_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)
+#define Q6_Vh_vmin_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1799,7 +1798,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vmin_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)
+#define Q6_Vub_vmin_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1810,7 +1809,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vmin_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)
+#define Q6_Vuh_vmin_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1821,7 +1820,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vmin_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)
+#define Q6_Vw_vmin_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1832,7 +1831,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)
+#define Q6_Wh_vmpa_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1843,7 +1842,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpaacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)
+#define Q6_Wh_vmpaacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1854,7 +1853,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)
+#define Q6_Wh_vmpa_WubWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1865,7 +1864,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubWub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)
+#define Q6_Wh_vmpa_WubWub(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1876,7 +1875,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpa_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)
+#define Q6_Ww_vmpa_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1887,7 +1886,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpaacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)
+#define Q6_Ww_vmpaacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1898,7 +1897,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)
+#define Q6_Wh_vmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1909,7 +1908,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpyacc_WhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)
+#define Q6_Wh_vmpyacc_WhVubRb(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1920,7 +1919,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)
+#define Q6_Wh_vmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1931,7 +1930,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpyacc_WhVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)
+#define Q6_Wh_vmpyacc_WhVubVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1942,7 +1941,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)
+#define Q6_Wh_vmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1953,7 +1952,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpyacc_WhVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)
+#define Q6_Wh_vmpyacc_WhVbVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1964,7 +1963,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)
+#define Q6_Vw_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1975,7 +1974,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpy_VhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)
+#define Q6_Ww_vmpy_VhRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -1986,29 +1985,29 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)
+#define Q6_Ww_vmpyacc_WwVhRh_sat(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat
    C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpy_VhRh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)
+#define Q6_Vh_vmpy_VhRh_s1_rnd_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat
    C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpy_VhRh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)
+#define Q6_Vh_vmpy_VhRh_s1_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2019,7 +2018,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpy_VhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)
+#define Q6_Ww_vmpy_VhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2030,7 +2029,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)
+#define Q6_Ww_vmpyacc_WwVhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2041,7 +2040,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpy_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)
+#define Q6_Ww_vmpy_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2052,18 +2051,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)
+#define Q6_Ww_vmpyacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat
    C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpy_VhVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)
+#define Q6_Vh_vmpy_VhVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2074,7 +2073,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyieo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)
+#define Q6_Vw_vmpyieo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2085,7 +2084,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyieacc_VwVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)
+#define Q6_Vw_vmpyieacc_VwVwVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2096,7 +2095,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyie_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)
+#define Q6_Vw_vmpyie_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2107,7 +2106,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyieacc_VwVwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)
+#define Q6_Vw_vmpyieacc_VwVwVuh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2118,7 +2117,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyi_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)
+#define Q6_Vh_vmpyi_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2129,7 +2128,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyiacc_VhVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)
+#define Q6_Vh_vmpyiacc_VhVhVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2140,7 +2139,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyi_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)
+#define Q6_Vh_vmpyi_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2151,7 +2150,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vmpyiacc_VhVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)
+#define Q6_Vh_vmpyiacc_VhVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2162,7 +2161,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyio_VwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)
+#define Q6_Vw_vmpyio_VwVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2173,7 +2172,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyi_VwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)
+#define Q6_Vw_vmpyi_VwRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2184,7 +2183,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyiacc_VwVwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)
+#define Q6_Vw_vmpyiacc_VwVwRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2195,7 +2194,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyi_VwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)
+#define Q6_Vw_vmpyi_VwRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2206,7 +2205,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyiacc_VwVwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)
+#define Q6_Vw_vmpyiacc_VwVwRh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2217,7 +2216,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyo_VwVh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)
+#define Q6_Vw_vmpyo_VwVh_s1_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2228,7 +2227,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)
+#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2239,7 +2238,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2250,7 +2249,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2261,7 +2260,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)
+#define Q6_Wuh_vmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2272,7 +2271,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpyacc_WuhVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)
+#define Q6_Wuh_vmpyacc_WuhVubRub(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2283,7 +2282,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)
+#define Q6_Wuh_vmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2294,7 +2293,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuh_vmpyacc_WuhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)
+#define Q6_Wuh_vmpyacc_WuhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2305,7 +2304,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpy_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)
+#define Q6_Wuw_vmpy_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2316,7 +2315,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpyacc_WuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)
+#define Q6_Wuw_vmpyacc_WuwVuhRuh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2327,7 +2326,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpy_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)
+#define Q6_Wuw_vmpy_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2338,7 +2337,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vmpyacc_WuwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)
+#define Q6_Wuw_vmpyacc_WuwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2349,7 +2348,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vmux_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)
+#define Q6_V_vmux_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2360,7 +2359,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vnavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)
+#define Q6_Vh_vnavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2371,7 +2370,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vnavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)
+#define Q6_Vb_vnavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2382,7 +2381,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vnavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)
+#define Q6_Vw_vnavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2393,7 +2392,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vnormamt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)
+#define Q6_Vh_vnormamt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2404,7 +2403,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vnormamt_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)
+#define Q6_Vw_vnormamt_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2415,7 +2414,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vnot_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)
+#define Q6_V_vnot_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2426,7 +2425,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)
+#define Q6_V_vor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2437,7 +2436,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vpacke_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)
+#define Q6_Vb_vpacke_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2448,7 +2447,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpacke_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)
+#define Q6_Vh_vpacke_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2459,7 +2458,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)
+#define Q6_Vb_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2470,7 +2469,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)
+#define Q6_Vub_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2481,7 +2480,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vpacko_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)
+#define Q6_Vb_vpacko_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2492,7 +2491,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpacko_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)
+#define Q6_Vh_vpacko_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2503,7 +2502,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)
+#define Q6_Vh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2514,7 +2513,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)
+#define Q6_Vuh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2525,7 +2524,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vpopcount_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)
+#define Q6_Vh_vpopcount_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2536,7 +2535,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vrdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)
+#define Q6_V_vrdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2547,7 +2546,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)
+#define Q6_Vw_vrmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2558,7 +2557,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpyacc_VwVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)
+#define Q6_Vw_vrmpyacc_VwVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2569,7 +2568,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vrmpy_WubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)
+#define Q6_Ww_vrmpy_WubRbI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)(Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2580,7 +2579,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vrmpyacc_WwWubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)
+#define Q6_Ww_vrmpyacc_WwWubRbI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)(Vxx,Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2591,18 +2590,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)
+#define Q6_Vw_vrmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vrmpy(Vu32.ub,Vv32.b)
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpyacc_VwVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)
+#define Q6_Vw_vrmpyacc_VwVubVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2613,18 +2612,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)
+#define Q6_Vw_vrmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.w+=vrmpy(Vu32.b,Vv32.b)
    C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vrmpyacc_VwVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)
+#define Q6_Vw_vrmpyacc_VwVbVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2635,7 +2634,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)
+#define Q6_Vuw_vrmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2646,7 +2645,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpyacc_VuwVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)
+#define Q6_Vuw_vrmpyacc_VuwVubRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2657,7 +2656,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrmpy_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)
+#define Q6_Wuw_vrmpy_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)(Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2668,7 +2667,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrmpyacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)
+#define Q6_Wuw_vrmpyacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)(Vxx,Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2679,18 +2678,18 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)
+#define Q6_Vuw_vrmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
 /* ==========================================================================
    Assembly Syntax:       Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)
    C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
-   Instruction Type:      CVI_VX_DV
+   Instruction Type:      CVI_VX
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vrmpyacc_VuwVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)
+#define Q6_Vuw_vrmpyacc_VuwVubVub(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)(Vx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2701,7 +2700,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vror_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)
+#define Q6_V_vror_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2712,7 +2711,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)
+#define Q6_Vb_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2723,7 +2722,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)
+#define Q6_Vub_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2734,7 +2733,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)
+#define Q6_Vh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2745,7 +2744,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)
+#define Q6_Vuh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2756,7 +2755,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrsad_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)
+#define Q6_Wuw_vrsad_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)(Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2767,7 +2766,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wuw_vrsadacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)
+#define Q6_Wuw_vrsadacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)(Vxx,Vuu,Rt,Iu1)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2778,7 +2777,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vsat_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)
+#define Q6_Vub_vsat_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2789,7 +2788,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vsat_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)
+#define Q6_Vh_vsat_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2800,7 +2799,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vsxt_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)
+#define Q6_Wh_vsxt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2811,7 +2810,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vsxt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)
+#define Q6_Ww_vsxt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2822,7 +2821,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vshuffe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)
+#define Q6_Vh_vshuffe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2833,7 +2832,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vshuff_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)
+#define Q6_Vb_vshuff_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2844,7 +2843,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vshuffe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)
+#define Q6_Vb_vshuffe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2855,7 +2854,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vshuff_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)
+#define Q6_Vh_vshuff_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2866,7 +2865,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vshuffo_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)
+#define Q6_Vb_vshuffo_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2877,7 +2876,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vshuff_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)
+#define Q6_W_vshuff_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2888,7 +2887,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vshuffoe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)
+#define Q6_Wb_vshuffoe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2899,7 +2898,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vshuffoe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)
+#define Q6_Wh_vshuffoe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2910,7 +2909,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vshuffo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)
+#define Q6_Vh_vshuffo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2921,7 +2920,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vsub_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)
+#define Q6_Vb_vsub_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2932,7 +2931,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vsub_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)
+#define Q6_Wb_vsub_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2943,7 +2942,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condnac_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)
+#define Q6_Vb_condnac_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2954,7 +2953,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_condnac_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)
+#define Q6_Vb_condnac_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2965,7 +2964,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)
+#define Q6_Vh_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2976,7 +2975,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vsub_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)
+#define Q6_Wh_vsub_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2987,7 +2986,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condnac_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)
+#define Q6_Vh_condnac_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -2998,7 +2997,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_condnac_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)
+#define Q6_Vh_condnac_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3009,7 +3008,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vsub_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)
+#define Q6_Vh_vsub_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3020,7 +3019,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vsub_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)
+#define Q6_Wh_vsub_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3031,7 +3030,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)
+#define Q6_Ww_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3042,7 +3041,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vsub_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)
+#define Q6_Wh_vsub_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3053,7 +3052,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vsub_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)
+#define Q6_Vub_vsub_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3064,7 +3063,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wub_vsub_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)
+#define Q6_Wub_vsub_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3075,7 +3074,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vsub_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)
+#define Q6_Vuh_vsub_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3086,7 +3085,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vsub_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)
+#define Q6_Wuh_vsub_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3097,7 +3096,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vsub_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)
+#define Q6_Ww_vsub_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3108,7 +3107,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsub_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)
+#define Q6_Vw_vsub_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3119,7 +3118,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vsub_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)
+#define Q6_Ww_vsub_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3130,7 +3129,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condnac_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)
+#define Q6_Vw_condnac_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3141,7 +3140,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_condnac_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)
+#define Q6_Vw_condnac_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3152,7 +3151,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsub_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)
+#define Q6_Vw_vsub_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3163,7 +3162,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vsub_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)
+#define Q6_Ww_vsub_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3174,7 +3173,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vswap_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)
+#define Q6_W_vswap_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3185,7 +3184,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpy_WbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)
+#define Q6_Wh_vtmpy_WbRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3196,7 +3195,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpyacc_WhWbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)
+#define Q6_Wh_vtmpyacc_WhWbRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3207,7 +3206,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)
+#define Q6_Wh_vtmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3218,7 +3217,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vtmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)
+#define Q6_Wh_vtmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3229,7 +3228,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vtmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)
+#define Q6_Ww_vtmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3240,7 +3239,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vtmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)
+#define Q6_Ww_vtmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3251,7 +3250,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vunpack_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)
+#define Q6_Wh_vunpack_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3262,7 +3261,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vunpack_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)
+#define Q6_Ww_vunpack_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3273,7 +3272,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vunpackoor_WhVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)
+#define Q6_Wh_vunpackoor_WhVb(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)(Vxx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3284,7 +3283,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vunpackoor_WwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)
+#define Q6_Ww_vunpackoor_WwVh(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)(Vxx,Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3295,7 +3294,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vunpack_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)
+#define Q6_Wuh_vunpack_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3306,7 +3305,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vunpack_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)
+#define Q6_Wuw_vunpack_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3317,7 +3316,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vxor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)
+#define Q6_V_vxor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3328,7 +3327,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuh_vzxt_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)
+#define Q6_Wuh_vzxt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 60
@@ -3339,7 +3338,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vzxt_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)
+#define Q6_Wuw_vzxt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 60 */
 
 #if __HVX_ARCH__ >= 62
@@ -3350,7 +3349,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vb_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)
+#define Q6_Vb_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)(Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3361,7 +3360,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vh_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)
+#define Q6_Vh_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)(Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3372,7 +3371,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Q_vsetq2_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)
+#define Q6_Q_vsetq2_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)(Rt)),-1)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3383,7 +3382,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Qb_vshuffe_QhQh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)
+#define Q6_Qb_vshuffe_QhQh(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3394,7 +3393,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Qh_vshuffe_QwQw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)
+#define Q6_Qh_vshuffe_QwQw(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3405,7 +3404,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vadd_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)
+#define Q6_Vb_vadd_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3416,7 +3415,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vadd_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)
+#define Q6_Wb_vadd_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3427,7 +3426,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)
+#define Q6_Vw_vadd_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)(Vu,Vv,Qx)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3438,7 +3437,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vadd_vclb_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)
+#define Q6_Vh_vadd_vclb_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3449,7 +3448,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_vclb_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)
+#define Q6_Vw_vadd_vclb_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3460,7 +3459,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vaddacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)
+#define Q6_Ww_vaddacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3471,7 +3470,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vaddacc_WhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)
+#define Q6_Wh_vaddacc_WhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3482,7 +3481,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vadd_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)
+#define Q6_Vub_vadd_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3493,7 +3492,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vaddacc_WwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)
+#define Q6_Ww_vaddacc_WwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3504,7 +3503,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vadd_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)
+#define Q6_Vuw_vadd_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3515,7 +3514,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vadd_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)
+#define Q6_Wuw_vadd_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3526,7 +3525,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vand_QnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)
+#define Q6_V_vand_QnR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3537,7 +3536,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_V_vandor_VQnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)
+#define Q6_V_vandor_VQnR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3548,7 +3547,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vand_QnV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)
+#define Q6_V_vand_QnV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3559,7 +3558,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_V_vand_QV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)
+#define Q6_V_vand_QV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3570,7 +3569,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)
+#define Q6_Vb_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3581,7 +3580,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VuwVuwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)
+#define Q6_Vuh_vasr_VuwVuwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3592,7 +3591,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)
+#define Q6_Vuh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3603,7 +3602,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vlsr_VubR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)
+#define Q6_Vub_vlsr_VubR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3614,7 +3613,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32_VbVbR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)
+#define Q6_Vb_vlut32_VbVbR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3625,7 +3624,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32or_VbVbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)
+#define Q6_Vb_vlut32or_VbVbVbI(Vx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)(Vx,Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3636,7 +3635,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vlut32_VbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)
+#define Q6_Vb_vlut32_VbVbI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3647,7 +3646,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16_VbVhR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)
+#define Q6_Wh_vlut16_VbVhR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3658,7 +3657,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16or_WhVbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)
+#define Q6_Wh_vlut16or_WhVbVhI(Vxx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)(Vxx,Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3669,7 +3668,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wh_vlut16_VbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)
+#define Q6_Wh_vlut16_VbVhI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)(Vu,Vv,Iu3)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3680,7 +3679,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vmax_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)
+#define Q6_Vb_vmax_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3691,7 +3690,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vmin_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)
+#define Q6_Vb_vmin_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3702,7 +3701,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpa_WuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)
+#define Q6_Ww_vmpa_WuhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3713,7 +3712,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpaacc_WwWuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)
+#define Q6_Ww_vmpaacc_WwWuhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3724,7 +3723,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_W_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)
+#define Q6_W_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3735,7 +3734,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyi_VwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)
+#define Q6_Vw_vmpyi_VwRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3746,7 +3745,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vw_vmpyiacc_VwVwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)
+#define Q6_Vw_vmpyiacc_VwVwRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3757,7 +3756,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_W_vmpyoacc_WVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)
+#define Q6_W_vmpyoacc_WVwVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3768,7 +3767,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vround_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)
+#define Q6_Vub_vround_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3779,7 +3778,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vround_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)
+#define Q6_Vuh_vround_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3790,7 +3789,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vsat_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)
+#define Q6_Vuh_vsat_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3801,7 +3800,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vsub_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)
+#define Q6_Vb_vsub_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3812,7 +3811,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wb_vsub_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)
+#define Q6_Wb_vsub_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3823,7 +3822,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsub_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)
+#define Q6_Vw_vsub_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)(Vu,Vv,Qx)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3834,7 +3833,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vsub_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)
+#define Q6_Vub_vsub_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3845,7 +3844,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vsub_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)
+#define Q6_Vuw_vsub_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 62
@@ -3856,7 +3855,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Wuw_vsub_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)
+#define Q6_Wuw_vsub_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)(Vuu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 62 */
 
 #if __HVX_ARCH__ >= 65
@@ -3867,7 +3866,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vabs_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)
+#define Q6_Vb_vabs_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3878,7 +3877,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vabs_Vb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)
+#define Q6_Vb_vabs_Vb_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)(Vu)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3889,7 +3888,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vaslacc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)
+#define Q6_Vh_vaslacc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3900,7 +3899,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_vasracc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)
+#define Q6_Vh_vasracc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3911,7 +3910,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VuhVuhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)
+#define Q6_Vub_vasr_VuhVuhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3922,7 +3921,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vub_vasr_VuhVuhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)
+#define Q6_Vub_vasr_VuhVuhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3933,7 +3932,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuh_vasr_VuwVuwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)
+#define Q6_Vuh_vasr_VuwVuwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)(Vu,Vv,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3944,7 +3943,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)
+#define Q6_Vb_vavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3955,7 +3954,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vavg_VbVb_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)
+#define Q6_Vb_vavg_VbVb_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3966,7 +3965,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vavg_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)
+#define Q6_Vuw_vavg_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3977,7 +3976,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vavg_VuwVuw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)
+#define Q6_Vuw_vavg_VuwVuw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3988,7 +3987,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_W_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)
+#define Q6_W_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)()
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -3999,7 +3998,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_ARMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)
+#define Q6_vgather_ARMVh(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)(Rs,Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4010,7 +4009,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_AQRMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)
+#define Q6_vgather_AQRMVh(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4021,7 +4020,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_ARMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)
+#define Q6_vgather_ARMWw(Rs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)(Rs,Rt,Mu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4032,7 +4031,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_AQRMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)
+#define Q6_vgather_AQRMWw(Rs,Qs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4043,7 +4042,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_ARMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)
+#define Q6_vgather_ARMVw(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)(Rs,Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4054,7 +4053,7 @@
    Execution Slots:       SLOT01
    ========================================================================== */
 
-#define Q6_vgather_AQRMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)
+#define Q6_vgather_AQRMVw(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4065,7 +4064,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vlut4_VuhPh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)
+#define Q6_Vh_vlut4_VuhPh(Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)(Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4076,7 +4075,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpa_WubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)
+#define Q6_Wh_vmpa_WubRub(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)(Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4087,7 +4086,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Wh_vmpaacc_WhWubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)
+#define Q6_Wh_vmpaacc_WhWubRub(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)(Vxx,Vuu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4098,7 +4097,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vmpa_VhVhVhPh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)
+#define Q6_Vh_vmpa_VhVhVhPh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)(Vx,Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4109,7 +4108,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vmpa_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)
+#define Q6_Vh_vmpa_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)(Vx,Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4120,7 +4119,7 @@
    Execution Slots:       SLOT2
    ========================================================================== */
 
-#define Q6_Vh_vmps_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)
+#define Q6_Vh_vmps_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)(Vx,Vu,Rtt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4131,7 +4130,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_vmpyacc_WwVhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)
+#define Q6_Ww_vmpyacc_WwVhRh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)(Vxx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4142,7 +4141,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vmpye_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)
+#define Q6_Vuw_vmpye_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)(Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4153,7 +4152,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Vuw_vmpyeacc_VuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)
+#define Q6_Vuw_vmpyeacc_VuwVuhRuh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)(Vx,Vu,Rt)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4164,7 +4163,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_vnavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)
+#define Q6_Vb_vnavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4175,7 +4174,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vb_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)
+#define Q6_Vb_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4186,7 +4185,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vh_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)
+#define Q6_Vh_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4197,7 +4196,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)
+#define Q6_Vw_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4208,7 +4207,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)
+#define Q6_vscatter_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4219,7 +4218,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatteracc_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)
+#define Q6_vscatteracc_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4230,7 +4229,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_QRMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)
+#define Q6_vscatter_QRMVhV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4241,7 +4240,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)
+#define Q6_vscatter_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)(Rt,Mu,Vvv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4252,7 +4251,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatteracc_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)
+#define Q6_vscatteracc_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)(Rt,Mu,Vvv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4263,7 +4262,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_QRMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)
+#define Q6_vscatter_QRMWwV(Qs,Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4274,7 +4273,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)
+#define Q6_vscatter_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4285,7 +4284,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatteracc_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)
+#define Q6_vscatteracc_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)(Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 65
@@ -4296,7 +4295,7 @@
    Execution Slots:       SLOT0
    ========================================================================== */
 
-#define Q6_vscatter_QRMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)
+#define Q6_vscatter_QRMVwV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)
 #endif /* __HEXAGON_ARCH___ >= 65 */
 
 #if __HVX_ARCH__ >= 66
@@ -4307,7 +4306,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vadd_VwVwQ_carry_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)
+#define Q6_Vw_vadd_VwVwQ_carry_sat(Vu,Vv,Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)(Vu,Vv,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 66
@@ -4318,7 +4317,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Ww_vasrinto_WwVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)
+#define Q6_Ww_vasrinto_WwVwVw(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)(Vxx,Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 66
@@ -4329,7 +4328,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vuw_vrotr_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)
+#define Q6_Vuw_vrotr_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 66
@@ -4340,7 +4339,7 @@
    Execution Slots:       SLOT0123
    ========================================================================== */
 
-#define Q6_Vw_vsatdw_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)
+#define Q6_Vw_vsatdw_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)(Vu,Vv)
 #endif /* __HEXAGON_ARCH___ >= 66 */
 
 #if __HVX_ARCH__ >= 68
@@ -4351,7 +4350,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpy_WubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)
+#define Q6_Ww_v6mpy_WubWbI_h(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)(Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
 #if __HVX_ARCH__ >= 68
@@ -4362,7 +4361,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpyacc_WwWubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)
+#define Q6_Ww_v6mpyacc_WwWubWbI_h(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)(Vxx,Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
 #if __HVX_ARCH__ >= 68
@@ -4373,7 +4372,7 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpy_WubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)
+#define Q6_Ww_v6mpy_WubWbI_v(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)(Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
 #if __HVX_ARCH__ >= 68
@@ -4384,9 +4383,801 @@
    Execution Slots:       SLOT23
    ========================================================================== */
 
-#define Q6_Ww_v6mpyacc_WwWubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)
+#define Q6_Ww_v6mpyacc_WwWubWbI_v(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)(Vxx,Vuu,Vvv,Iu2)
 #endif /* __HEXAGON_ARCH___ >= 68 */
 
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vabs(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vabs_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vabs_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vabs(Vu32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vabs_Vsf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vabs_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_sf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vadd(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vadd(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vadd_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vadd_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vadd_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vadd_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vadd(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vadd(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vadd(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.w=vfmv(Vu32.w)
+   C Intrinsic Prototype: HVX_Vector Q6_Vw_vfmv_Vw(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vw_vfmv_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign_fp)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=Vu32.qf16
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Vqf16(HVX_Vector Vu)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_equals_Vqf16(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf16)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=Vuu32.qf32
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Wqf32(HVX_VectorPair Vuu)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_equals_Wqf32(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf32)(Vuu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=Vu32.qf32
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_equals_Vqf32(HVX_Vector Vu)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vsf_equals_Vqf32(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_sf_qf32)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.b=vcvt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vb_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vb_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_b_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.h=vcvt(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vh_vcvt_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_h_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.hf=vcvt(Vu32.b)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vb(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Whf_vcvt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_b)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vcvt(Vu32.h)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vh(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vcvt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_h)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vcvt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vcvt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.hf=vcvt(Vu32.ub)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vub(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Whf_vcvt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_ub)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vcvt(Vu32.uh)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vuh(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vcvt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_uh)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vcvt(Vu32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vcvt_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_sf_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.ub=vcvt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vub_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vub_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_ub_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vcvt(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcvt_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vuh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_uh_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vdmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpyacc_VsfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vdmpyacc_VsfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf_acc)(Vx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vfmax(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vfmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vfmax(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vfmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vfmin(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vfmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vfmin(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vfmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vfneg(Vu32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfneg_Vhf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vfneg_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vfneg(Vu32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfneg_Vsf(HVX_Vector Vu)
+   Instruction Type:      CVI_VX_LATE
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vfneg_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_sf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qd4=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf)(Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qd4=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf)(Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vmax(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_vmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vmax(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vsf_vmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vmin(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vhf_vmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vmin(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VA
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vsf_vmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpyacc_VhfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vmpyacc_VhfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf_acc)(Vx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf16_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf16_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf16_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_mix_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf32_vmpy_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wqf32_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wqf32_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_mix_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wqf32_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vqf32_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpyacc_WsfVhfVhf(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vmpyacc_WsfVhfVhf(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf_acc)(Vxx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vmpy(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vsub(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.hf=vsub(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vhf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vhf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vsub_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf16_vsub_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vsub_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vsub_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.qf32=vsub(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vqf32_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vdd32.sf=vsub(Vu32.hf,Vv32.hf)
+   C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX_DV
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Wsf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+   Assembly Syntax:       Vd32.sf=vsub(Vu32.sf,Vv32.sf)
+   C Intrinsic Prototype: HVX_Vector Q6_Vsf_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vsf_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vub_vasr_WuhVub_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubrndsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vub_vasr_WuhVub_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vuh_vasr_WwVuh_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhrndsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+   Instruction Type:      CVI_VS
+   Execution Slots:       SLOT0123
+   ========================================================================== */
+
+#define Q6_Vuh_vasr_WwVuh_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+   Assembly Syntax:       Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16
+   C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmpy_VuhVuh_rs16(HVX_Vector Vu, HVX_Vector Vv)
+   Instruction Type:      CVI_VX
+   Execution Slots:       SLOT23
+   ========================================================================== */
+
+#define Q6_Vuh_vmpy_VuhVuh_rs16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhvs)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
 #endif /* __HVX__ */
 
 #endif
diff --git a/linux-x86/lib64/clang/14.0.2/include/ia32intrin.h b/linux-x86/lib64/clang/14.0.6/include/ia32intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/ia32intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/ia32intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/immintrin.h b/linux-x86/lib64/clang/14.0.6/include/immintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/immintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/immintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/intrin.h b/linux-x86/lib64/clang/14.0.6/include/intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/inttypes.h b/linux-x86/lib64/clang/14.0.6/include/inttypes.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/inttypes.h
rename to linux-x86/lib64/clang/14.0.6/include/inttypes.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/invpcidintrin.h b/linux-x86/lib64/clang/14.0.6/include/invpcidintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/invpcidintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/invpcidintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/iso646.h b/linux-x86/lib64/clang/14.0.6/include/iso646.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/iso646.h
rename to linux-x86/lib64/clang/14.0.6/include/iso646.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/keylockerintrin.h b/linux-x86/lib64/clang/14.0.6/include/keylockerintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/keylockerintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/keylockerintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/limits.h b/linux-x86/lib64/clang/14.0.6/include/limits.h
similarity index 81%
copy from darwin-x86/lib64/clang/14.0.2/include/limits.h
copy to linux-x86/lib64/clang/14.0.6/include/limits.h
index c653580..c2d3a7c 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/limits.h
+++ b/linux-x86/lib64/clang/14.0.6/include/limits.h
@@ -62,6 +62,24 @@
 
 #define CHAR_BIT  __CHAR_BIT__
 
+/* C2x 5.2.4.2.1 */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+#define BOOL_WIDTH   __BOOL_WIDTH__
+#define CHAR_WIDTH   CHAR_BIT
+#define SCHAR_WIDTH  CHAR_BIT
+#define UCHAR_WIDTH  CHAR_BIT
+#define USHRT_WIDTH  __SHRT_WIDTH__
+#define SHRT_WIDTH   __SHRT_WIDTH__
+#define UINT_WIDTH   __INT_WIDTH__
+#define INT_WIDTH    __INT_WIDTH__
+#define ULONG_WIDTH  __LONG_WIDTH__
+#define LONG_WIDTH   __LONG_WIDTH__
+#define ULLONG_WIDTH __LLONG_WIDTH__
+#define LLONG_WIDTH  __LLONG_WIDTH__
+#endif
+
 #ifdef __CHAR_UNSIGNED__  /* -funsigned-char */
 #define CHAR_MIN 0
 #define CHAR_MAX UCHAR_MAX
diff --git a/linux-x86/lib64/clang/14.0.2/include/lwpintrin.h b/linux-x86/lib64/clang/14.0.6/include/lwpintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/lwpintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/lwpintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/lzcntintrin.h b/linux-x86/lib64/clang/14.0.6/include/lzcntintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/lzcntintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/lzcntintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/mm3dnow.h b/linux-x86/lib64/clang/14.0.6/include/mm3dnow.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/mm3dnow.h
rename to linux-x86/lib64/clang/14.0.6/include/mm3dnow.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/mm_malloc.h b/linux-x86/lib64/clang/14.0.6/include/mm_malloc.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/mm_malloc.h
rename to linux-x86/lib64/clang/14.0.6/include/mm_malloc.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/mmintrin.h b/linux-x86/lib64/clang/14.0.6/include/mmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/mmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/mmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/module.modulemap b/linux-x86/lib64/clang/14.0.6/include/module.modulemap
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/module.modulemap
rename to linux-x86/lib64/clang/14.0.6/include/module.modulemap
diff --git a/linux-x86/lib64/clang/14.0.2/include/movdirintrin.h b/linux-x86/lib64/clang/14.0.6/include/movdirintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/movdirintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/movdirintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/msa.h b/linux-x86/lib64/clang/14.0.6/include/msa.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/msa.h
rename to linux-x86/lib64/clang/14.0.6/include/msa.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/mwaitxintrin.h b/linux-x86/lib64/clang/14.0.6/include/mwaitxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/mwaitxintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/mwaitxintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/nmmintrin.h b/linux-x86/lib64/clang/14.0.6/include/nmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/nmmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/nmmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/omp-tools.h b/linux-x86/lib64/clang/14.0.6/include/omp-tools.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/omp-tools.h
rename to linux-x86/lib64/clang/14.0.6/include/omp-tools.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/omp.h b/linux-x86/lib64/clang/14.0.6/include/omp.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/omp.h
rename to linux-x86/lib64/clang/14.0.6/include/omp.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/opencl-c-base.h b/linux-x86/lib64/clang/14.0.6/include/opencl-c-base.h
similarity index 98%
copy from darwin-x86/lib64/clang/14.0.2/include/opencl-c-base.h
copy to linux-x86/lib64/clang/14.0.6/include/opencl-c-base.h
index 9c81ddb..ad276dc 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/opencl-c-base.h
+++ b/linux-x86/lib64/clang/14.0.6/include/opencl-c-base.h
@@ -68,9 +68,16 @@
 // For the SPIR and SPIR-V target all features are supported.
 #if defined(__SPIR__) || defined(__SPIRV__)
 #define __opencl_c_atomic_scope_all_devices 1
+#define __opencl_c_read_write_images 1
 #endif // defined(__SPIR__)
 #endif // (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
 
+#if !defined(__opencl_c_generic_address_space)
+// Internal feature macro to provide named (global, local, private) address
+// space overloads for builtin functions that take a pointer argument.
+#define __opencl_c_named_address_space_builtins 1
+#endif // !defined(__opencl_c_generic_address_space)
+
 // built-in scalar data types:
 
 /**
@@ -498,12 +505,14 @@
 
 #define MAX_WORK_DIM 3
 
+#ifdef __opencl_c_device_enqueue
 typedef struct {
   unsigned int workDimension;
   size_t globalWorkOffset[MAX_WORK_DIM];
   size_t globalWorkSize[MAX_WORK_DIM];
   size_t localWorkSize[MAX_WORK_DIM];
 } ndrange_t;
+#endif // __opencl_c_device_enqueue
 
 #endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 
@@ -600,9 +609,11 @@
 // C++ for OpenCL - __remove_address_space
 #if defined(__OPENCL_CPP_VERSION__)
 template <typename _Tp> struct __remove_address_space { using type = _Tp; };
+#if defined(__opencl_c_generic_address_space)
 template <typename _Tp> struct __remove_address_space<__generic _Tp> {
   using type = _Tp;
 };
+#endif
 template <typename _Tp> struct __remove_address_space<__global _Tp> {
   using type = _Tp;
 };
diff --git a/darwin-x86/lib64/clang/14.0.2/include/opencl-c.h b/linux-x86/lib64/clang/14.0.6/include/opencl-c.h
similarity index 96%
copy from darwin-x86/lib64/clang/14.0.2/include/opencl-c.h
copy to linux-x86/lib64/clang/14.0.6/include/opencl-c.h
index 32af848..059a2ec 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/opencl-c.h
+++ b/linux-x86/lib64/clang/14.0.6/include/opencl-c.h
@@ -11,11 +11,11 @@
 
 #include "opencl-c-base.h"
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_images)
 #ifndef cl_khr_depth_images
 #define cl_khr_depth_images
 #endif //cl_khr_depth_images
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_images)
 
 #if __OPENCL_C_VERSION__ < CL_VERSION_2_0
 #ifdef cl_khr_3d_image_writes
@@ -7285,7 +7285,9 @@
 half8 __ovld fract(half8 x, half8 *iptr);
 half16 __ovld fract(half16 x, half16 *iptr);
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 float __ovld fract(float x, __global float *iptr);
 float2 __ovld fract(float2 x, __global float2 *iptr);
 float3 __ovld fract(float3 x, __global float3 *iptr);
@@ -7344,7 +7346,7 @@
 half8 __ovld fract(half8 x, __private half8 *iptr);
 half16 __ovld fract(half16 x, __private half16 *iptr);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * Extract mantissa and exponent from x. For each
@@ -7375,7 +7377,9 @@
 half8 __ovld frexp(half8 x, int8 *exp);
 half16 __ovld frexp(half16 x, int16 *exp);
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 float __ovld frexp(float x, __global int *exp);
 float2 __ovld frexp(float2 x, __global int2 *exp);
 float3 __ovld frexp(float3 x, __global int3 *exp);
@@ -7434,7 +7438,7 @@
 half8 __ovld frexp(half8 x, __private int8 *exp);
 half16 __ovld frexp(half16 x, __private int16 *exp);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * Compute the value of the square root of x^2 + y^2
@@ -7582,7 +7586,9 @@
 half8 __ovld lgamma_r(half8 x, int8 *signp);
 half16 __ovld lgamma_r(half16 x, int16 *signp);
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 float __ovld lgamma_r(float x, __global int *signp);
 float2 __ovld lgamma_r(float2 x, __global int2 *signp);
 float3 __ovld lgamma_r(float3 x, __global int3 *signp);
@@ -7641,7 +7647,7 @@
 half8 __ovld lgamma_r(half8 x, __private int8 *signp);
 half16 __ovld lgamma_r(half16 x, __private int16 *signp);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * Compute natural logarithm.
@@ -7888,7 +7894,9 @@
 half8 __ovld modf(half8 x, half8 *iptr);
 half16 __ovld modf(half16 x, half16 *iptr);
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 float __ovld modf(float x, __global float *iptr);
 float2 __ovld modf(float2 x, __global float2 *iptr);
 float3 __ovld modf(float3 x, __global float3 *iptr);
@@ -7947,7 +7955,7 @@
 half8 __ovld modf(half8 x, __private half8 *iptr);
 half16 __ovld modf(half16 x, __private half16 *iptr);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * Returns a quiet NaN. The nancode may be placed
@@ -8147,9 +8155,10 @@
 half4 __ovld remquo(half4 x, half4 y, int4 *quo);
 half8 __ovld remquo(half8 x, half8 y, int8 *quo);
 half16 __ovld remquo(half16 x, half16 y, int16 *quo);
-
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 float __ovld remquo(float x, float y, __global int *quo);
 float2 __ovld remquo(float2 x, float2 y, __global int2 *quo);
 float3 __ovld remquo(float3 x, float3 y, __global int3 *quo);
@@ -8208,7 +8217,7 @@
 half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
 half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 /**
  * Round to integral value (using round to nearest
  * even rounding mode) in floating-point format.
@@ -8372,7 +8381,9 @@
 half8 __ovld sincos(half8 x, half8 *cosval);
 half16 __ovld sincos(half16 x, half16 *cosval);
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 float __ovld sincos(float x, __global float *cosval);
 float2 __ovld sincos(float2 x, __global float2 *cosval);
 float3 __ovld sincos(float3 x, __global float3 *cosval);
@@ -8431,7 +8442,7 @@
 half8 __ovld sincos(half8 x, __private half8 *cosval);
 half16 __ovld sincos(half16 x, __private half16 *cosval);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * Compute hyperbolic sine.
@@ -11190,308 +11201,310 @@
  * 64-bit aligned if gentype is long, ulong, double.
  */
 
-char2 __ovld vload2(size_t offset, const __constant char *p);
-uchar2 __ovld vload2(size_t offset, const __constant uchar *p);
-short2 __ovld vload2(size_t offset, const __constant short *p);
-ushort2 __ovld vload2(size_t offset, const __constant ushort *p);
-int2 __ovld vload2(size_t offset, const __constant int *p);
-uint2 __ovld vload2(size_t offset, const __constant uint *p);
-long2 __ovld vload2(size_t offset, const __constant long *p);
-ulong2 __ovld vload2(size_t offset, const __constant ulong *p);
-float2 __ovld vload2(size_t offset, const __constant float *p);
-char3 __ovld vload3(size_t offset, const __constant char *p);
-uchar3 __ovld vload3(size_t offset, const __constant uchar *p);
-short3 __ovld vload3(size_t offset, const __constant short *p);
-ushort3 __ovld vload3(size_t offset, const __constant ushort *p);
-int3 __ovld vload3(size_t offset, const __constant int *p);
-uint3 __ovld vload3(size_t offset, const __constant uint *p);
-long3 __ovld vload3(size_t offset, const __constant long *p);
-ulong3 __ovld vload3(size_t offset, const __constant ulong *p);
-float3 __ovld vload3(size_t offset, const __constant float *p);
-char4 __ovld vload4(size_t offset, const __constant char *p);
-uchar4 __ovld vload4(size_t offset, const __constant uchar *p);
-short4 __ovld vload4(size_t offset, const __constant short *p);
-ushort4 __ovld vload4(size_t offset, const __constant ushort *p);
-int4 __ovld vload4(size_t offset, const __constant int *p);
-uint4 __ovld vload4(size_t offset, const __constant uint *p);
-long4 __ovld vload4(size_t offset, const __constant long *p);
-ulong4 __ovld vload4(size_t offset, const __constant ulong *p);
-float4 __ovld vload4(size_t offset, const __constant float *p);
-char8 __ovld vload8(size_t offset, const __constant char *p);
-uchar8 __ovld vload8(size_t offset, const __constant uchar *p);
-short8 __ovld vload8(size_t offset, const __constant short *p);
-ushort8 __ovld vload8(size_t offset, const __constant ushort *p);
-int8 __ovld vload8(size_t offset, const __constant int *p);
-uint8 __ovld vload8(size_t offset, const __constant uint *p);
-long8 __ovld vload8(size_t offset, const __constant long *p);
-ulong8 __ovld vload8(size_t offset, const __constant ulong *p);
-float8 __ovld vload8(size_t offset, const __constant float *p);
-char16 __ovld vload16(size_t offset, const __constant char *p);
-uchar16 __ovld vload16(size_t offset, const __constant uchar *p);
-short16 __ovld vload16(size_t offset, const __constant short *p);
-ushort16 __ovld vload16(size_t offset, const __constant ushort *p);
-int16 __ovld vload16(size_t offset, const __constant int *p);
-uint16 __ovld vload16(size_t offset, const __constant uint *p);
-long16 __ovld vload16(size_t offset, const __constant long *p);
-ulong16 __ovld vload16(size_t offset, const __constant ulong *p);
-float16 __ovld vload16(size_t offset, const __constant float *p);
+char2 __ovld __purefn vload2(size_t offset, const __constant char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const __constant uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const __constant short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const __constant ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const __constant int *p);
+uint2 __ovld __purefn vload2(size_t offset, const __constant uint *p);
+long2 __ovld __purefn vload2(size_t offset, const __constant long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const __constant ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const __constant float *p);
+char3 __ovld __purefn vload3(size_t offset, const __constant char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const __constant uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const __constant short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const __constant ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const __constant int *p);
+uint3 __ovld __purefn vload3(size_t offset, const __constant uint *p);
+long3 __ovld __purefn vload3(size_t offset, const __constant long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const __constant ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const __constant float *p);
+char4 __ovld __purefn vload4(size_t offset, const __constant char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const __constant uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const __constant short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const __constant ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const __constant int *p);
+uint4 __ovld __purefn vload4(size_t offset, const __constant uint *p);
+long4 __ovld __purefn vload4(size_t offset, const __constant long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const __constant ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const __constant float *p);
+char8 __ovld __purefn vload8(size_t offset, const __constant char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const __constant uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const __constant short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const __constant ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const __constant int *p);
+uint8 __ovld __purefn vload8(size_t offset, const __constant uint *p);
+long8 __ovld __purefn vload8(size_t offset, const __constant long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const __constant ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const __constant float *p);
+char16 __ovld __purefn vload16(size_t offset, const __constant char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const __constant uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const __constant short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const __constant ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const __constant int *p);
+uint16 __ovld __purefn vload16(size_t offset, const __constant uint *p);
+long16 __ovld __purefn vload16(size_t offset, const __constant long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const __constant ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const __constant float *p);
 #ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __constant double *p);
-double3 __ovld vload3(size_t offset, const __constant double *p);
-double4 __ovld vload4(size_t offset, const __constant double *p);
-double8 __ovld vload8(size_t offset, const __constant double *p);
-double16 __ovld vload16(size_t offset, const __constant double *p);
+double2 __ovld __purefn vload2(size_t offset, const __constant double *p);
+double3 __ovld __purefn vload3(size_t offset, const __constant double *p);
+double4 __ovld __purefn vload4(size_t offset, const __constant double *p);
+double8 __ovld __purefn vload8(size_t offset, const __constant double *p);
+double16 __ovld __purefn vload16(size_t offset, const __constant double *p);
 #endif //cl_khr_fp64
 
 #ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __constant half *p);
-half2 __ovld vload2(size_t offset, const __constant half *p);
-half3 __ovld vload3(size_t offset, const __constant half *p);
-half4 __ovld vload4(size_t offset, const __constant half *p);
-half8 __ovld vload8(size_t offset, const __constant half *p);
-half16 __ovld vload16(size_t offset, const __constant half *p);
+half __ovld __purefn vload(size_t offset, const __constant half *p);
+half2 __ovld __purefn vload2(size_t offset, const __constant half *p);
+half3 __ovld __purefn vload3(size_t offset, const __constant half *p);
+half4 __ovld __purefn vload4(size_t offset, const __constant half *p);
+half8 __ovld __purefn vload8(size_t offset, const __constant half *p);
+half16 __ovld __purefn vload16(size_t offset, const __constant half *p);
 #endif //cl_khr_fp16
 
 #if defined(__opencl_c_generic_address_space)
-char2 __ovld vload2(size_t offset, const char *p);
-uchar2 __ovld vload2(size_t offset, const uchar *p);
-short2 __ovld vload2(size_t offset, const short *p);
-ushort2 __ovld vload2(size_t offset, const ushort *p);
-int2 __ovld vload2(size_t offset, const int *p);
-uint2 __ovld vload2(size_t offset, const uint *p);
-long2 __ovld vload2(size_t offset, const long *p);
-ulong2 __ovld vload2(size_t offset, const ulong *p);
-float2 __ovld vload2(size_t offset, const float *p);
-char3 __ovld vload3(size_t offset, const char *p);
-uchar3 __ovld vload3(size_t offset, const uchar *p);
-short3 __ovld vload3(size_t offset, const short *p);
-ushort3 __ovld vload3(size_t offset, const ushort *p);
-int3 __ovld vload3(size_t offset, const int *p);
-uint3 __ovld vload3(size_t offset, const uint *p);
-long3 __ovld vload3(size_t offset, const long *p);
-ulong3 __ovld vload3(size_t offset, const ulong *p);
-float3 __ovld vload3(size_t offset, const float *p);
-char4 __ovld vload4(size_t offset, const char *p);
-uchar4 __ovld vload4(size_t offset, const uchar *p);
-short4 __ovld vload4(size_t offset, const short *p);
-ushort4 __ovld vload4(size_t offset, const ushort *p);
-int4 __ovld vload4(size_t offset, const int *p);
-uint4 __ovld vload4(size_t offset, const uint *p);
-long4 __ovld vload4(size_t offset, const long *p);
-ulong4 __ovld vload4(size_t offset, const ulong *p);
-float4 __ovld vload4(size_t offset, const float *p);
-char8 __ovld vload8(size_t offset, const char *p);
-uchar8 __ovld vload8(size_t offset, const uchar *p);
-short8 __ovld vload8(size_t offset, const short *p);
-ushort8 __ovld vload8(size_t offset, const ushort *p);
-int8 __ovld vload8(size_t offset, const int *p);
-uint8 __ovld vload8(size_t offset, const uint *p);
-long8 __ovld vload8(size_t offset, const long *p);
-ulong8 __ovld vload8(size_t offset, const ulong *p);
-float8 __ovld vload8(size_t offset, const float *p);
-char16 __ovld vload16(size_t offset, const char *p);
-uchar16 __ovld vload16(size_t offset, const uchar *p);
-short16 __ovld vload16(size_t offset, const short *p);
-ushort16 __ovld vload16(size_t offset, const ushort *p);
-int16 __ovld vload16(size_t offset, const int *p);
-uint16 __ovld vload16(size_t offset, const uint *p);
-long16 __ovld vload16(size_t offset, const long *p);
-ulong16 __ovld vload16(size_t offset, const ulong *p);
-float16 __ovld vload16(size_t offset, const float *p);
+char2 __ovld __purefn vload2(size_t offset, const char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const int *p);
+uint2 __ovld __purefn vload2(size_t offset, const uint *p);
+long2 __ovld __purefn vload2(size_t offset, const long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const float *p);
+char3 __ovld __purefn vload3(size_t offset, const char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const int *p);
+uint3 __ovld __purefn vload3(size_t offset, const uint *p);
+long3 __ovld __purefn vload3(size_t offset, const long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const float *p);
+char4 __ovld __purefn vload4(size_t offset, const char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const int *p);
+uint4 __ovld __purefn vload4(size_t offset, const uint *p);
+long4 __ovld __purefn vload4(size_t offset, const long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const float *p);
+char8 __ovld __purefn vload8(size_t offset, const char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const int *p);
+uint8 __ovld __purefn vload8(size_t offset, const uint *p);
+long8 __ovld __purefn vload8(size_t offset, const long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const float *p);
+char16 __ovld __purefn vload16(size_t offset, const char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const int *p);
+uint16 __ovld __purefn vload16(size_t offset, const uint *p);
+long16 __ovld __purefn vload16(size_t offset, const long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const float *p);
 
 #ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const double *p);
-double3 __ovld vload3(size_t offset, const double *p);
-double4 __ovld vload4(size_t offset, const double *p);
-double8 __ovld vload8(size_t offset, const double *p);
-double16 __ovld vload16(size_t offset, const double *p);
+double2 __ovld __purefn vload2(size_t offset, const double *p);
+double3 __ovld __purefn vload3(size_t offset, const double *p);
+double4 __ovld __purefn vload4(size_t offset, const double *p);
+double8 __ovld __purefn vload8(size_t offset, const double *p);
+double16 __ovld __purefn vload16(size_t offset, const double *p);
 #endif //cl_khr_fp64
 
 #ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const half *p);
-half2 __ovld vload2(size_t offset, const half *p);
-half3 __ovld vload3(size_t offset, const half *p);
-half4 __ovld vload4(size_t offset, const half *p);
-half8 __ovld vload8(size_t offset, const half *p);
-half16 __ovld vload16(size_t offset, const half *p);
-#endif //cl_khr_fp16
-#else
-char2 __ovld vload2(size_t offset, const __global char *p);
-uchar2 __ovld vload2(size_t offset, const __global uchar *p);
-short2 __ovld vload2(size_t offset, const __global short *p);
-ushort2 __ovld vload2(size_t offset, const __global ushort *p);
-int2 __ovld vload2(size_t offset, const __global int *p);
-uint2 __ovld vload2(size_t offset, const __global uint *p);
-long2 __ovld vload2(size_t offset, const __global long *p);
-ulong2 __ovld vload2(size_t offset, const __global ulong *p);
-float2 __ovld vload2(size_t offset, const __global float *p);
-char3 __ovld vload3(size_t offset, const __global char *p);
-uchar3 __ovld vload3(size_t offset, const __global uchar *p);
-short3 __ovld vload3(size_t offset, const __global short *p);
-ushort3 __ovld vload3(size_t offset, const __global ushort *p);
-int3 __ovld vload3(size_t offset, const __global int *p);
-uint3 __ovld vload3(size_t offset, const __global uint *p);
-long3 __ovld vload3(size_t offset, const __global long *p);
-ulong3 __ovld vload3(size_t offset, const __global ulong *p);
-float3 __ovld vload3(size_t offset, const __global float *p);
-char4 __ovld vload4(size_t offset, const __global char *p);
-uchar4 __ovld vload4(size_t offset, const __global uchar *p);
-short4 __ovld vload4(size_t offset, const __global short *p);
-ushort4 __ovld vload4(size_t offset, const __global ushort *p);
-int4 __ovld vload4(size_t offset, const __global int *p);
-uint4 __ovld vload4(size_t offset, const __global uint *p);
-long4 __ovld vload4(size_t offset, const __global long *p);
-ulong4 __ovld vload4(size_t offset, const __global ulong *p);
-float4 __ovld vload4(size_t offset, const __global float *p);
-char8 __ovld vload8(size_t offset, const __global char *p);
-uchar8 __ovld vload8(size_t offset, const __global uchar *p);
-short8 __ovld vload8(size_t offset, const __global short *p);
-ushort8 __ovld vload8(size_t offset, const __global ushort *p);
-int8 __ovld vload8(size_t offset, const __global int *p);
-uint8 __ovld vload8(size_t offset, const __global uint *p);
-long8 __ovld vload8(size_t offset, const __global long *p);
-ulong8 __ovld vload8(size_t offset, const __global ulong *p);
-float8 __ovld vload8(size_t offset, const __global float *p);
-char16 __ovld vload16(size_t offset, const __global char *p);
-uchar16 __ovld vload16(size_t offset, const __global uchar *p);
-short16 __ovld vload16(size_t offset, const __global short *p);
-ushort16 __ovld vload16(size_t offset, const __global ushort *p);
-int16 __ovld vload16(size_t offset, const __global int *p);
-uint16 __ovld vload16(size_t offset, const __global uint *p);
-long16 __ovld vload16(size_t offset, const __global long *p);
-ulong16 __ovld vload16(size_t offset, const __global ulong *p);
-float16 __ovld vload16(size_t offset, const __global float *p);
-char2 __ovld vload2(size_t offset, const __local char *p);
-uchar2 __ovld vload2(size_t offset, const __local uchar *p);
-short2 __ovld vload2(size_t offset, const __local short *p);
-ushort2 __ovld vload2(size_t offset, const __local ushort *p);
-int2 __ovld vload2(size_t offset, const __local int *p);
-uint2 __ovld vload2(size_t offset, const __local uint *p);
-long2 __ovld vload2(size_t offset, const __local long *p);
-ulong2 __ovld vload2(size_t offset, const __local ulong *p);
-float2 __ovld vload2(size_t offset, const __local float *p);
-char3 __ovld vload3(size_t offset, const __local char *p);
-uchar3 __ovld vload3(size_t offset, const __local uchar *p);
-short3 __ovld vload3(size_t offset, const __local short *p);
-ushort3 __ovld vload3(size_t offset, const __local ushort *p);
-int3 __ovld vload3(size_t offset, const __local int *p);
-uint3 __ovld vload3(size_t offset, const __local uint *p);
-long3 __ovld vload3(size_t offset, const __local long *p);
-ulong3 __ovld vload3(size_t offset, const __local ulong *p);
-float3 __ovld vload3(size_t offset, const __local float *p);
-char4 __ovld vload4(size_t offset, const __local char *p);
-uchar4 __ovld vload4(size_t offset, const __local uchar *p);
-short4 __ovld vload4(size_t offset, const __local short *p);
-ushort4 __ovld vload4(size_t offset, const __local ushort *p);
-int4 __ovld vload4(size_t offset, const __local int *p);
-uint4 __ovld vload4(size_t offset, const __local uint *p);
-long4 __ovld vload4(size_t offset, const __local long *p);
-ulong4 __ovld vload4(size_t offset, const __local ulong *p);
-float4 __ovld vload4(size_t offset, const __local float *p);
-char8 __ovld vload8(size_t offset, const __local char *p);
-uchar8 __ovld vload8(size_t offset, const __local uchar *p);
-short8 __ovld vload8(size_t offset, const __local short *p);
-ushort8 __ovld vload8(size_t offset, const __local ushort *p);
-int8 __ovld vload8(size_t offset, const __local int *p);
-uint8 __ovld vload8(size_t offset, const __local uint *p);
-long8 __ovld vload8(size_t offset, const __local long *p);
-ulong8 __ovld vload8(size_t offset, const __local ulong *p);
-float8 __ovld vload8(size_t offset, const __local float *p);
-char16 __ovld vload16(size_t offset, const __local char *p);
-uchar16 __ovld vload16(size_t offset, const __local uchar *p);
-short16 __ovld vload16(size_t offset, const __local short *p);
-ushort16 __ovld vload16(size_t offset, const __local ushort *p);
-int16 __ovld vload16(size_t offset, const __local int *p);
-uint16 __ovld vload16(size_t offset, const __local uint *p);
-long16 __ovld vload16(size_t offset, const __local long *p);
-ulong16 __ovld vload16(size_t offset, const __local ulong *p);
-float16 __ovld vload16(size_t offset, const __local float *p);
-char2 __ovld vload2(size_t offset, const __private char *p);
-uchar2 __ovld vload2(size_t offset, const __private uchar *p);
-short2 __ovld vload2(size_t offset, const __private short *p);
-ushort2 __ovld vload2(size_t offset, const __private ushort *p);
-int2 __ovld vload2(size_t offset, const __private int *p);
-uint2 __ovld vload2(size_t offset, const __private uint *p);
-long2 __ovld vload2(size_t offset, const __private long *p);
-ulong2 __ovld vload2(size_t offset, const __private ulong *p);
-float2 __ovld vload2(size_t offset, const __private float *p);
-char3 __ovld vload3(size_t offset, const __private char *p);
-uchar3 __ovld vload3(size_t offset, const __private uchar *p);
-short3 __ovld vload3(size_t offset, const __private short *p);
-ushort3 __ovld vload3(size_t offset, const __private ushort *p);
-int3 __ovld vload3(size_t offset, const __private int *p);
-uint3 __ovld vload3(size_t offset, const __private uint *p);
-long3 __ovld vload3(size_t offset, const __private long *p);
-ulong3 __ovld vload3(size_t offset, const __private ulong *p);
-float3 __ovld vload3(size_t offset, const __private float *p);
-char4 __ovld vload4(size_t offset, const __private char *p);
-uchar4 __ovld vload4(size_t offset, const __private uchar *p);
-short4 __ovld vload4(size_t offset, const __private short *p);
-ushort4 __ovld vload4(size_t offset, const __private ushort *p);
-int4 __ovld vload4(size_t offset, const __private int *p);
-uint4 __ovld vload4(size_t offset, const __private uint *p);
-long4 __ovld vload4(size_t offset, const __private long *p);
-ulong4 __ovld vload4(size_t offset, const __private ulong *p);
-float4 __ovld vload4(size_t offset, const __private float *p);
-char8 __ovld vload8(size_t offset, const __private char *p);
-uchar8 __ovld vload8(size_t offset, const __private uchar *p);
-short8 __ovld vload8(size_t offset, const __private short *p);
-ushort8 __ovld vload8(size_t offset, const __private ushort *p);
-int8 __ovld vload8(size_t offset, const __private int *p);
-uint8 __ovld vload8(size_t offset, const __private uint *p);
-long8 __ovld vload8(size_t offset, const __private long *p);
-ulong8 __ovld vload8(size_t offset, const __private ulong *p);
-float8 __ovld vload8(size_t offset, const __private float *p);
-char16 __ovld vload16(size_t offset, const __private char *p);
-uchar16 __ovld vload16(size_t offset, const __private uchar *p);
-short16 __ovld vload16(size_t offset, const __private short *p);
-ushort16 __ovld vload16(size_t offset, const __private ushort *p);
-int16 __ovld vload16(size_t offset, const __private int *p);
-uint16 __ovld vload16(size_t offset, const __private uint *p);
-long16 __ovld vload16(size_t offset, const __private long *p);
-ulong16 __ovld vload16(size_t offset, const __private ulong *p);
-float16 __ovld vload16(size_t offset, const __private float *p);
-
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __global double *p);
-double3 __ovld vload3(size_t offset, const __global double *p);
-double4 __ovld vload4(size_t offset, const __global double *p);
-double8 __ovld vload8(size_t offset, const __global double *p);
-double16 __ovld vload16(size_t offset, const __global double *p);
-double2 __ovld vload2(size_t offset, const __local double *p);
-double3 __ovld vload3(size_t offset, const __local double *p);
-double4 __ovld vload4(size_t offset, const __local double *p);
-double8 __ovld vload8(size_t offset, const __local double *p);
-double16 __ovld vload16(size_t offset, const __local double *p);
-double2 __ovld vload2(size_t offset, const __private double *p);
-double3 __ovld vload3(size_t offset, const __private double *p);
-double4 __ovld vload4(size_t offset, const __private double *p);
-double8 __ovld vload8(size_t offset, const __private double *p);
-double16 __ovld vload16(size_t offset, const __private double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __global half *p);
-half2 __ovld vload2(size_t offset, const __global half *p);
-half3 __ovld vload3(size_t offset, const __global half *p);
-half4 __ovld vload4(size_t offset, const __global half *p);
-half8 __ovld vload8(size_t offset, const __global half *p);
-half16 __ovld vload16(size_t offset, const __global half *p);
-half __ovld vload(size_t offset, const __local half *p);
-half2 __ovld vload2(size_t offset, const __local half *p);
-half3 __ovld vload3(size_t offset, const __local half *p);
-half4 __ovld vload4(size_t offset, const __local half *p);
-half8 __ovld vload8(size_t offset, const __local half *p);
-half16 __ovld vload16(size_t offset, const __local half *p);
-half __ovld vload(size_t offset, const __private half *p);
-half2 __ovld vload2(size_t offset, const __private half *p);
-half3 __ovld vload3(size_t offset, const __private half *p);
-half4 __ovld vload4(size_t offset, const __private half *p);
-half8 __ovld vload8(size_t offset, const __private half *p);
-half16 __ovld vload16(size_t offset, const __private half *p);
+half __ovld __purefn vload(size_t offset, const half *p);
+half2 __ovld __purefn vload2(size_t offset, const half *p);
+half3 __ovld __purefn vload3(size_t offset, const half *p);
+half4 __ovld __purefn vload4(size_t offset, const half *p);
+half8 __ovld __purefn vload8(size_t offset, const half *p);
+half16 __ovld __purefn vload16(size_t offset, const half *p);
 #endif //cl_khr_fp16
 #endif //defined(__opencl_c_generic_address_space)
 
+#if defined(__opencl_c_named_address_space_builtins)
+char2 __ovld __purefn vload2(size_t offset, const __global char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const __global uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const __global short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const __global ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const __global int *p);
+uint2 __ovld __purefn vload2(size_t offset, const __global uint *p);
+long2 __ovld __purefn vload2(size_t offset, const __global long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const __global ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const __global float *p);
+char3 __ovld __purefn vload3(size_t offset, const __global char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const __global uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const __global short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const __global ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const __global int *p);
+uint3 __ovld __purefn vload3(size_t offset, const __global uint *p);
+long3 __ovld __purefn vload3(size_t offset, const __global long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const __global ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const __global float *p);
+char4 __ovld __purefn vload4(size_t offset, const __global char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const __global uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const __global short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const __global ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const __global int *p);
+uint4 __ovld __purefn vload4(size_t offset, const __global uint *p);
+long4 __ovld __purefn vload4(size_t offset, const __global long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const __global ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const __global float *p);
+char8 __ovld __purefn vload8(size_t offset, const __global char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const __global uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const __global short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const __global ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const __global int *p);
+uint8 __ovld __purefn vload8(size_t offset, const __global uint *p);
+long8 __ovld __purefn vload8(size_t offset, const __global long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const __global ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const __global float *p);
+char16 __ovld __purefn vload16(size_t offset, const __global char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const __global uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const __global short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const __global ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const __global int *p);
+uint16 __ovld __purefn vload16(size_t offset, const __global uint *p);
+long16 __ovld __purefn vload16(size_t offset, const __global long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const __global ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const __global float *p);
+char2 __ovld __purefn vload2(size_t offset, const __local char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const __local uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const __local short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const __local ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const __local int *p);
+uint2 __ovld __purefn vload2(size_t offset, const __local uint *p);
+long2 __ovld __purefn vload2(size_t offset, const __local long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const __local ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const __local float *p);
+char3 __ovld __purefn vload3(size_t offset, const __local char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const __local uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const __local short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const __local ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const __local int *p);
+uint3 __ovld __purefn vload3(size_t offset, const __local uint *p);
+long3 __ovld __purefn vload3(size_t offset, const __local long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const __local ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const __local float *p);
+char4 __ovld __purefn vload4(size_t offset, const __local char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const __local uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const __local short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const __local ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const __local int *p);
+uint4 __ovld __purefn vload4(size_t offset, const __local uint *p);
+long4 __ovld __purefn vload4(size_t offset, const __local long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const __local ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const __local float *p);
+char8 __ovld __purefn vload8(size_t offset, const __local char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const __local uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const __local short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const __local ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const __local int *p);
+uint8 __ovld __purefn vload8(size_t offset, const __local uint *p);
+long8 __ovld __purefn vload8(size_t offset, const __local long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const __local ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const __local float *p);
+char16 __ovld __purefn vload16(size_t offset, const __local char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const __local uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const __local short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const __local ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const __local int *p);
+uint16 __ovld __purefn vload16(size_t offset, const __local uint *p);
+long16 __ovld __purefn vload16(size_t offset, const __local long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const __local ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const __local float *p);
+char2 __ovld __purefn vload2(size_t offset, const __private char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const __private uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const __private short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const __private ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const __private int *p);
+uint2 __ovld __purefn vload2(size_t offset, const __private uint *p);
+long2 __ovld __purefn vload2(size_t offset, const __private long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const __private ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const __private float *p);
+char3 __ovld __purefn vload3(size_t offset, const __private char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const __private uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const __private short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const __private ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const __private int *p);
+uint3 __ovld __purefn vload3(size_t offset, const __private uint *p);
+long3 __ovld __purefn vload3(size_t offset, const __private long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const __private ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const __private float *p);
+char4 __ovld __purefn vload4(size_t offset, const __private char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const __private uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const __private short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const __private ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const __private int *p);
+uint4 __ovld __purefn vload4(size_t offset, const __private uint *p);
+long4 __ovld __purefn vload4(size_t offset, const __private long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const __private ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const __private float *p);
+char8 __ovld __purefn vload8(size_t offset, const __private char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const __private uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const __private short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const __private ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const __private int *p);
+uint8 __ovld __purefn vload8(size_t offset, const __private uint *p);
+long8 __ovld __purefn vload8(size_t offset, const __private long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const __private ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const __private float *p);
+char16 __ovld __purefn vload16(size_t offset, const __private char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const __private uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const __private short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const __private ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const __private int *p);
+uint16 __ovld __purefn vload16(size_t offset, const __private uint *p);
+long16 __ovld __purefn vload16(size_t offset, const __private long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const __private ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const __private float *p);
+
+#ifdef cl_khr_fp64
+double2 __ovld __purefn vload2(size_t offset, const __global double *p);
+double3 __ovld __purefn vload3(size_t offset, const __global double *p);
+double4 __ovld __purefn vload4(size_t offset, const __global double *p);
+double8 __ovld __purefn vload8(size_t offset, const __global double *p);
+double16 __ovld __purefn vload16(size_t offset, const __global double *p);
+double2 __ovld __purefn vload2(size_t offset, const __local double *p);
+double3 __ovld __purefn vload3(size_t offset, const __local double *p);
+double4 __ovld __purefn vload4(size_t offset, const __local double *p);
+double8 __ovld __purefn vload8(size_t offset, const __local double *p);
+double16 __ovld __purefn vload16(size_t offset, const __local double *p);
+double2 __ovld __purefn vload2(size_t offset, const __private double *p);
+double3 __ovld __purefn vload3(size_t offset, const __private double *p);
+double4 __ovld __purefn vload4(size_t offset, const __private double *p);
+double8 __ovld __purefn vload8(size_t offset, const __private double *p);
+double16 __ovld __purefn vload16(size_t offset, const __private double *p);
+#endif //cl_khr_fp64
+
+#ifdef cl_khr_fp16
+half __ovld __purefn vload(size_t offset, const __global half *p);
+half2 __ovld __purefn vload2(size_t offset, const __global half *p);
+half3 __ovld __purefn vload3(size_t offset, const __global half *p);
+half4 __ovld __purefn vload4(size_t offset, const __global half *p);
+half8 __ovld __purefn vload8(size_t offset, const __global half *p);
+half16 __ovld __purefn vload16(size_t offset, const __global half *p);
+half __ovld __purefn vload(size_t offset, const __local half *p);
+half2 __ovld __purefn vload2(size_t offset, const __local half *p);
+half3 __ovld __purefn vload3(size_t offset, const __local half *p);
+half4 __ovld __purefn vload4(size_t offset, const __local half *p);
+half8 __ovld __purefn vload8(size_t offset, const __local half *p);
+half16 __ovld __purefn vload16(size_t offset, const __local half *p);
+half __ovld __purefn vload(size_t offset, const __private half *p);
+half2 __ovld __purefn vload2(size_t offset, const __private half *p);
+half3 __ovld __purefn vload3(size_t offset, const __private half *p);
+half4 __ovld __purefn vload4(size_t offset, const __private half *p);
+half8 __ovld __purefn vload8(size_t offset, const __private half *p);
+half16 __ovld __purefn vload16(size_t offset, const __private half *p);
+#endif //cl_khr_fp16
+#endif //defined(__opencl_c_named_address_space_builtins)
+
 #if defined(__opencl_c_generic_address_space)
 void __ovld vstore2(char2 data, size_t offset, char *p);
 void __ovld vstore2(uchar2 data, size_t offset, uchar *p);
@@ -11553,7 +11566,9 @@
 void __ovld vstore8(half8 data, size_t offset, half *p);
 void __ovld vstore16(half16 data, size_t offset, half *p);
 #endif //cl_khr_fp16
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 void __ovld vstore2(char2 data, size_t offset, __global char *p);
 void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p);
 void __ovld vstore2(short2 data, size_t offset, __global short *p);
@@ -11726,7 +11741,7 @@
 void __ovld vstore8(half8 data, size_t offset, __private half *p);
 void __ovld vstore16(half16 data, size_t offset, __private half *p);
 #endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * Read sizeof (half) bytes of data from address
@@ -11736,15 +11751,17 @@
  * The read address computed as (p + offset)
  * must be 16-bit aligned.
  */
-float __ovld vload_half(size_t offset, const __constant half *p);
+float __ovld __purefn vload_half(size_t offset, const __constant half *p);
 #if defined(__opencl_c_generic_address_space)
-float __ovld vload_half(size_t offset, const half *p);
-#else
-float __ovld vload_half(size_t offset, const __global half *p);
-float __ovld vload_half(size_t offset, const __local half *p);
-float __ovld vload_half(size_t offset, const __private half *p);
+float __ovld __purefn vload_half(size_t offset, const half *p);
 #endif //defined(__opencl_c_generic_address_space)
 
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld __purefn vload_half(size_t offset, const __global half *p);
+float __ovld __purefn vload_half(size_t offset, const __local half *p);
+float __ovld __purefn vload_half(size_t offset, const __private half *p);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
 /**
  * Read sizeof (halfn) bytes of data from address
  * (p + (offset * n)). The data read is interpreted
@@ -11753,35 +11770,37 @@
  * value is returned. The read address computed
  * as (p + (offset * n)) must be 16-bit aligned.
  */
-float2 __ovld vload_half2(size_t offset, const __constant half *p);
-float3 __ovld vload_half3(size_t offset, const __constant half *p);
-float4 __ovld vload_half4(size_t offset, const __constant half *p);
-float8 __ovld vload_half8(size_t offset, const __constant half *p);
-float16 __ovld vload_half16(size_t offset, const __constant half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const __constant half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const __constant half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const __constant half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const __constant half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const __constant half *p);
 #if defined(__opencl_c_generic_address_space)
-float2 __ovld vload_half2(size_t offset, const half *p);
-float3 __ovld vload_half3(size_t offset, const half *p);
-float4 __ovld vload_half4(size_t offset, const half *p);
-float8 __ovld vload_half8(size_t offset, const half *p);
-float16 __ovld vload_half16(size_t offset, const half *p);
-#else
-float2 __ovld vload_half2(size_t offset, const __global half *p);
-float3 __ovld vload_half3(size_t offset, const __global half *p);
-float4 __ovld vload_half4(size_t offset, const __global half *p);
-float8 __ovld vload_half8(size_t offset, const __global half *p);
-float16 __ovld vload_half16(size_t offset, const __global half *p);
-float2 __ovld vload_half2(size_t offset, const __local half *p);
-float3 __ovld vload_half3(size_t offset, const __local half *p);
-float4 __ovld vload_half4(size_t offset, const __local half *p);
-float8 __ovld vload_half8(size_t offset, const __local half *p);
-float16 __ovld vload_half16(size_t offset, const __local half *p);
-float2 __ovld vload_half2(size_t offset, const __private half *p);
-float3 __ovld vload_half3(size_t offset, const __private half *p);
-float4 __ovld vload_half4(size_t offset, const __private half *p);
-float8 __ovld vload_half8(size_t offset, const __private half *p);
-float16 __ovld vload_half16(size_t offset, const __private half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const half *p);
 #endif //defined(__opencl_c_generic_address_space)
 
+#if defined(__opencl_c_named_address_space_builtins)
+float2 __ovld __purefn vload_half2(size_t offset, const __global half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const __global half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const __global half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const __global half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const __global half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const __local half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const __local half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const __local half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const __local half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const __local half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const __private half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const __private half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const __private half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const __private half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const __private half *p);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
 /**
  * The float value given by data is first
  * converted to a half value using the appropriate
@@ -11806,7 +11825,9 @@
 void __ovld vstore_half_rtp(double data, size_t offset, half *p);
 void __ovld vstore_half_rtn(double data, size_t offset, half *p);
 #endif //cl_khr_fp64
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 void __ovld vstore_half(float data, size_t offset, __global half *p);
 void __ovld vstore_half_rte(float data, size_t offset, __global half *p);
 void __ovld vstore_half_rtz(float data, size_t offset, __global half *p);
@@ -11839,7 +11860,7 @@
 void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
 void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
 #endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * The floatn value given by data is converted to
@@ -11905,7 +11926,9 @@
 void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p);
 void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p);
 #endif //cl_khr_fp64
-#else
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
 void __ovld vstore_half2(float2 data, size_t offset, __global half *p);
 void __ovld vstore_half3(float3 data, size_t offset, __global half *p);
 void __ovld vstore_half4(float4 data, size_t offset, __global half *p);
@@ -12058,7 +12081,7 @@
 void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
 void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
 #endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 /**
  * For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
@@ -12073,35 +12096,37 @@
  * The address computed as (p + (offset * 4))
  * must be aligned to sizeof (half) * 4 bytes.
  */
-float2 __ovld vloada_half2(size_t offset, const __constant half *p);
-float3 __ovld vloada_half3(size_t offset, const __constant half *p);
-float4 __ovld vloada_half4(size_t offset, const __constant half *p);
-float8 __ovld vloada_half8(size_t offset, const __constant half *p);
-float16 __ovld vloada_half16(size_t offset, const __constant half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const __constant half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const __constant half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const __constant half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const __constant half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const __constant half *p);
 #if defined(__opencl_c_generic_address_space)
-float2 __ovld vloada_half2(size_t offset, const half *p);
-float3 __ovld vloada_half3(size_t offset, const half *p);
-float4 __ovld vloada_half4(size_t offset, const half *p);
-float8 __ovld vloada_half8(size_t offset, const half *p);
-float16 __ovld vloada_half16(size_t offset, const half *p);
-#else
-float2 __ovld vloada_half2(size_t offset, const __global half *p);
-float3 __ovld vloada_half3(size_t offset, const __global half *p);
-float4 __ovld vloada_half4(size_t offset, const __global half *p);
-float8 __ovld vloada_half8(size_t offset, const __global half *p);
-float16 __ovld vloada_half16(size_t offset, const __global half *p);
-float2 __ovld vloada_half2(size_t offset, const __local half *p);
-float3 __ovld vloada_half3(size_t offset, const __local half *p);
-float4 __ovld vloada_half4(size_t offset, const __local half *p);
-float8 __ovld vloada_half8(size_t offset, const __local half *p);
-float16 __ovld vloada_half16(size_t offset, const __local half *p);
-float2 __ovld vloada_half2(size_t offset, const __private half *p);
-float3 __ovld vloada_half3(size_t offset, const __private half *p);
-float4 __ovld vloada_half4(size_t offset, const __private half *p);
-float8 __ovld vloada_half8(size_t offset, const __private half *p);
-float16 __ovld vloada_half16(size_t offset, const __private half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const half *p);
 #endif //defined(__opencl_c_generic_address_space)
 
+#if defined(__opencl_c_named_address_space_builtins)
+float2 __ovld __purefn vloada_half2(size_t offset, const __global half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const __global half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const __global half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const __global half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const __global half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const __local half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const __local half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const __local half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const __local half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const __local half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const __private half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const __private half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const __private half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const __private half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const __private half *p);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
 /**
  * The floatn value given by data is converted to
  * a halfn value using the appropriate rounding
@@ -12180,8 +12205,9 @@
 void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p);
 void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p);
 #endif //cl_khr_fp64
+#endif //defined(__opencl_c_generic_address_space)
 
-#else
+#if defined(__opencl_c_named_address_space_builtins)
 void __ovld vstorea_half2(float2 data, size_t offset, __global half *p);
 void __ovld vstorea_half3(float3 data, size_t offset, __global half *p);
 void __ovld vstorea_half4(float4 data, size_t offset, __global half *p);
@@ -12363,7 +12389,7 @@
 void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
 void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
 #endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
 
 // OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
 
@@ -15585,7 +15611,7 @@
 #endif //cl_khr_fp16
 
 // Image read functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord);
 int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord);
 uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord);
@@ -15628,7 +15654,6 @@
 float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 #ifdef cl_khr_mipmap_image
 float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
 int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
@@ -15679,7 +15704,6 @@
 uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
 
 #endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 
 // Image read functions returning half4 type
 #ifdef cl_khr_fp16
@@ -15690,7 +15714,7 @@
 half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
 half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
 #endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Write color value to location specified by coordinate
@@ -15834,7 +15858,7 @@
 #endif //cl_khr_fp16
 
 // Image write functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color);
 void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color);
 void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color);
@@ -15866,7 +15890,6 @@
 void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color);
 #endif //cl_khr_depth_images
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 #if defined(cl_khr_mipmap_image_writes)
 void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
 void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
@@ -15894,7 +15917,6 @@
 #endif //cl_khr_3d_image_writes
 
 #endif //cl_khr_mipmap_image_writes
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 
 // Image write functions for half4 type
 #ifdef cl_khr_fp16
@@ -15907,7 +15929,7 @@
 void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
 void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color);
 #endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 // Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
 // access qualifier, which by default assume read_only access qualifier. Image query builtin
@@ -15955,7 +15977,7 @@
 int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int __ovld __cnfn get_image_width(read_write image1d_t image);
 int __ovld __cnfn get_image_width(read_write image1d_buffer_t image);
 int __ovld __cnfn get_image_width(read_write image2d_t image);
@@ -15972,7 +15994,7 @@
 int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image);
 int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Return the image height in pixels.
@@ -16007,7 +16029,7 @@
 int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int __ovld __cnfn get_image_height(read_write image2d_t image);
 int __ovld __cnfn get_image_height(read_write image3d_t image);
 int __ovld __cnfn get_image_height(read_write image2d_array_t image);
@@ -16021,7 +16043,7 @@
 int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image);
 int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Return the image depth in pixels.
@@ -16032,9 +16054,9 @@
 int __ovld __cnfn get_image_depth(write_only image3d_t image);
 #endif
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int __ovld __cnfn get_image_depth(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 // OpenCL Extension v2.0 s9.18 - Mipmaps
 #if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -16053,9 +16075,11 @@
 int __ovld get_image_num_mip_levels(write_only image3d_t image);
 #endif
 
+#if defined(__opencl_c_read_write_images)
 int __ovld get_image_num_mip_levels(read_write image1d_t image);
 int __ovld get_image_num_mip_levels(read_write image2d_t image);
 int __ovld get_image_num_mip_levels(read_write image3d_t image);
+#endif //defined(__opencl_c_read_write_images)
 
 int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
 int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
@@ -16067,10 +16091,12 @@
 int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
 int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
 
+#if defined(__opencl_c_read_write_images)
 int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
 int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
 int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
 int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
+#endif //defined(__opencl_c_read_write_images)
 
 #endif //cl_khr_mipmap_image
 #endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -16130,7 +16156,7 @@
 int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image);
 int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image);
 int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image);
@@ -16147,7 +16173,7 @@
 int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image);
 int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Return the image channel order. Valid values are:
@@ -16202,7 +16228,7 @@
 int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int __ovld __cnfn get_image_channel_order(read_write image1d_t image);
 int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image);
 int __ovld __cnfn get_image_channel_order(read_write image2d_t image);
@@ -16219,7 +16245,7 @@
 int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image);
 int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Return the 2D image width and height as an int2
@@ -16252,7 +16278,7 @@
 int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int2 __ovld __cnfn get_image_dim(read_write image2d_t image);
 int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image);
 #ifdef cl_khr_depth_images
@@ -16265,7 +16291,7 @@
 int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image);
 int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
 #endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Return the 3D image width, height, and depth as an
@@ -16277,9 +16303,9 @@
 #ifdef cl_khr_3d_image_writes
 int4 __ovld __cnfn get_image_dim(write_only image3d_t image);
 #endif
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int4 __ovld __cnfn get_image_dim(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
  * Return the image array size.
@@ -16305,7 +16331,7 @@
 size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array);
 #endif //cl_khr_gl_msaa_sharing
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array);
 size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array);
 #ifdef cl_khr_depth_images
@@ -16315,7 +16341,7 @@
 size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array);
 size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array);
 #endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 
 /**
 * Return the number of samples associated with image
@@ -16331,12 +16357,12 @@
 int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
 int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 int __ovld get_image_num_samples(read_write image2d_msaa_t image);
 int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
 int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
 int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
 #endif
 
 // OpenCL v2.0 s6.13.15 - Work-group Functions
@@ -16450,6 +16476,7 @@
 // OpenCL v2.0 s6.13.17 - Enqueue Kernels
 #if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 
+#ifdef __opencl_c_device_enqueue
 ndrange_t __ovld ndrange_1D(size_t);
 ndrange_t __ovld ndrange_1D(size_t, size_t);
 ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
@@ -16477,6 +16504,7 @@
 void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
 
 queue_t __ovld get_default_queue(void);
+#endif //__opencl_c_device_enqueue
 #endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
 
 // OpenCL Extension v2.0 s9.17 - Sub-groups
@@ -17572,34 +17600,38 @@
 long    __ovld __conv intel_sub_group_shuffle_xor( long x, uint c );
 ulong   __ovld __conv intel_sub_group_shuffle_xor( ulong x, uint c );
 
+#if defined(__opencl_c_images)
 uint    __ovld __conv intel_sub_group_block_read( read_only image2d_t image, int2 coord );
 uint2   __ovld __conv intel_sub_group_block_read2( read_only image2d_t image, int2 coord );
 uint4   __ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord );
 uint8   __ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord );
+#endif
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 uint    __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord);
 uint2   __ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord);
 uint4   __ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord);
 uint8   __ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
 
 uint    __ovld __conv intel_sub_group_block_read( const __global uint* p );
 uint2   __ovld __conv intel_sub_group_block_read2( const __global uint* p );
 uint4   __ovld __conv intel_sub_group_block_read4( const __global uint* p );
 uint8   __ovld __conv intel_sub_group_block_read8( const __global uint* p );
 
+#if defined(__opencl_c_images)
 void    __ovld __conv intel_sub_group_block_write(write_only image2d_t image, int2 coord, uint data);
 void    __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, int2 coord, uint2 data);
 void    __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data);
 void    __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data);
+#endif // defined(__opencl_c_images)
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 void    __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord, uint data);
 void    __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data);
 void    __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data);
 void    __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
 
 void    __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );
 void    __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data );
@@ -17712,68 +17744,76 @@
 short       __ovld __conv intel_sub_group_scan_inclusive_max( short   x );
 ushort      __ovld __conv intel_sub_group_scan_inclusive_max( ushort  x );
 
+#if defined(__opencl_c_images)
 uint       __ovld __conv intel_sub_group_block_read_ui( read_only image2d_t image, int2 byte_coord );
 uint2      __ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t image, int2 byte_coord );
 uint4      __ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord );
 uint8      __ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord );
+#endif // defined(__opencl_c_images)
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 uint       __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord );
 uint2      __ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord );
 uint4      __ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord );
 uint8      __ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
 
 uint       __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );
 uint2      __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p );
 uint4      __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p );
 uint8      __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p );
 
+#if defined(__opencl_c_images)
 void       __ovld __conv intel_sub_group_block_write_ui( read_only image2d_t image, int2 byte_coord, uint data );
 void       __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t image, int2 byte_coord, uint2 data );
 void       __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data );
 void       __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data );
+#endif //defined(__opencl_c_images)
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 void       __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord, uint data );
 void       __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data );
 void       __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data );
 void       __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
 
 void       __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );
 void       __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data );
 void       __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint4 data );
 void       __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data );
 
+#if defined(__opencl_c_images)
 ushort      __ovld __conv intel_sub_group_block_read_us( read_only image2d_t image, int2 coord );
 ushort2     __ovld __conv intel_sub_group_block_read_us2( read_only image2d_t image, int2 coord );
 ushort4     __ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord );
 ushort8     __ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord );
+#endif // defined(__opencl_c_images)
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 ushort      __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord);
 ushort2     __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord);
 ushort4     __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord);
 ushort8     __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
 
 ushort      __ovld __conv intel_sub_group_block_read_us(  const __global ushort* p );
 ushort2     __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p );
 ushort4     __ovld __conv intel_sub_group_block_read_us4( const __global ushort* p );
 ushort8     __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p );
 
+#if defined(__opencl_c_images)
 void        __ovld __conv intel_sub_group_block_write_us(write_only image2d_t image, int2 coord, ushort  data);
 void        __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t image, int2 coord, ushort2 data);
 void        __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data);
 void        __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data);
+#endif // defined(__opencl_c_images)
 
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
 void        __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord, ushort  data);
 void        __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data);
 void        __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data);
 void        __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
 
 void        __ovld __conv intel_sub_group_block_write_us(  __global ushort* p, ushort  data );
 void        __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data );
@@ -17891,6 +17931,7 @@
     short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
     ushort2 image_size);
 
+#if defined(__opencl_c_images)
 intel_sub_group_avc_ime_result_t __ovld
 intel_sub_group_avc_ime_evaluate_with_single_reference(
     read_only image2d_t src_image, read_only image2d_t ref_image,
@@ -17931,6 +17972,7 @@
     read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
     intel_sub_group_avc_ime_payload_t payload,
     intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
+#endif
 
 intel_sub_group_avc_ime_single_reference_streamin_t __ovld
 intel_sub_group_avc_ime_get_single_reference_streamin(
@@ -17995,6 +18037,7 @@
 intel_sub_group_avc_ref_set_bilinear_filter_enable(
     intel_sub_group_avc_ref_payload_t payload);
 
+#if defined(__opencl_c_images)
 intel_sub_group_avc_ref_result_t __ovld
 intel_sub_group_avc_ref_evaluate_with_single_reference(
     read_only image2d_t src_image, read_only image2d_t ref_image,
@@ -18013,6 +18056,7 @@
     read_only image2d_t src_image, uint packed_reference_ids,
     uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
     intel_sub_group_avc_ref_payload_t payload);
+#endif //defined(__opencl_c_images)
 
 // SIC built-in functions
 intel_sub_group_avc_sic_payload_t __ovld
@@ -18063,6 +18107,7 @@
     uchar block_based_skip_type,
     intel_sub_group_avc_sic_payload_t payload);
 
+#if defined(__opencl_c_images)
 intel_sub_group_avc_sic_result_t __ovld
 intel_sub_group_avc_sic_evaluate_ipe(
     read_only image2d_t src_image, sampler_t vme_media_sampler,
@@ -18085,6 +18130,7 @@
     read_only image2d_t src_image, uint packed_reference_ids,
     uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
     intel_sub_group_avc_sic_payload_t payload);
+#endif //defined(__opencl_c_images)
 
 uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
     intel_sub_group_avc_sic_result_t result);
@@ -18493,6 +18539,8 @@
 // Disable any extensions we may have enabled previously.
 #pragma OPENCL EXTENSION all : disable
 
+#undef __opencl_c_named_address_space_builtins
+
 #undef __cnfn
 #undef __ovld
 #endif //_OPENCL_H_
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/__clang_openmp_device_functions.h b/linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/__clang_openmp_device_functions.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/__clang_openmp_device_functions.h
rename to linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/__clang_openmp_device_functions.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/cmath b/linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/cmath
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/cmath
rename to linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/cmath
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex b/linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/complex
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex
rename to linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/complex
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex.h b/linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/complex.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex.h
rename to linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/complex.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex_cmath.h b/linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/complex_cmath.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/complex_cmath.h
rename to linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/complex_cmath.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/math.h b/linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/math.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/math.h
rename to linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/math.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/new b/linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/new
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/openmp_wrappers/new
rename to linux-x86/lib64/clang/14.0.6/include/openmp_wrappers/new
diff --git a/linux-x86/lib64/clang/14.0.2/include/pconfigintrin.h b/linux-x86/lib64/clang/14.0.6/include/pconfigintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/pconfigintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/pconfigintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/pkuintrin.h b/linux-x86/lib64/clang/14.0.6/include/pkuintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/pkuintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/pkuintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/pmmintrin.h b/linux-x86/lib64/clang/14.0.6/include/pmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/pmmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/pmmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/popcntintrin.h b/linux-x86/lib64/clang/14.0.6/include/popcntintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/popcntintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/popcntintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/emmintrin.h b/linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/emmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/emmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/emmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/mm_malloc.h b/linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/mm_malloc.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/mm_malloc.h
rename to linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/mm_malloc.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h b/linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/mmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/mmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/mmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/pmmintrin.h b/linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/pmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/pmmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/pmmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/smmintrin.h b/linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/smmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/smmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/smmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/tmmintrin.h b/linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/tmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/tmmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/tmmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/xmmintrin.h b/linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/xmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/ppc_wrappers/xmmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/ppc_wrappers/xmmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/prfchwintrin.h b/linux-x86/lib64/clang/14.0.6/include/prfchwintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/prfchwintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/prfchwintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc b/linux-x86/lib64/clang/14.0.6/include/profile/InstrProfData.inc
similarity index 98%
copy from darwin-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc
copy to linux-x86/lib64/clang/14.0.6/include/profile/InstrProfData.inc
index 008b8dd..62054a6 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/profile/InstrProfData.inc
+++ b/linux-x86/lib64/clang/14.0.6/include/profile/InstrProfData.inc
@@ -128,8 +128,10 @@
 INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
 INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
 INSTR_PROF_RAW_HEADER(uint64_t, BinaryIdsSize, __llvm_write_binary_ids(NULL))
+/* FIXME: A more accurate name is NumData */
 INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
 INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters)
+/* FIXME: A more accurate name is NumCounters */
 INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
 INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters)
 INSTR_PROF_RAW_HEADER(uint64_t, NamesSize,  NamesSize)
@@ -644,6 +646,7 @@
        (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
         (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
 
+/* FIXME: Please remedy the fixme in the header before bumping the version. */
 /* Raw profile format version (start from 1). */
 #define INSTR_PROF_RAW_VERSION 8
 /* Indexed profile format version (start from 1). */
@@ -653,15 +656,21 @@
 
 /* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
  * version for other variants of profile. We set the lowest bit of the upper 8
- * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
+ * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentation
  * generated profile, and 0 if this is a Clang FE generated profile.
  * 1 in bit 57 indicates there are context-sensitive records in the profile.
+ * The 59th bit indicates whether to use debug info to correlate profiles.
+ * The 60th bit indicates single byte coverage instrumentation.
+ * The 61st bit indicates function entry instrumentation only.
  */
 #define VARIANT_MASKS_ALL 0xff00000000000000ULL
 #define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
 #define VARIANT_MASK_IR_PROF (0x1ULL << 56)
 #define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
 #define VARIANT_MASK_INSTR_ENTRY (0x1ULL << 58)
+#define VARIANT_MASK_DBG_CORRELATE (0x1ULL << 59)
+#define VARIANT_MASK_BYTE_COVERAGE (0x1ULL << 60)
+#define VARIANT_MASK_FUNCTION_ENTRY_ONLY (0x1ULL << 61)
 #define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
 #define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
 #define INSTR_PROF_PROFILE_COUNTER_BIAS_VAR __llvm_profile_counter_bias
diff --git a/linux-x86/lib64/clang/14.0.2/include/ptwriteintrin.h b/linux-x86/lib64/clang/14.0.6/include/ptwriteintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/ptwriteintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/ptwriteintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/rdseedintrin.h b/linux-x86/lib64/clang/14.0.6/include/rdseedintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/rdseedintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/rdseedintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/rtmintrin.h b/linux-x86/lib64/clang/14.0.6/include/rtmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/rtmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/rtmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/s390intrin.h b/linux-x86/lib64/clang/14.0.6/include/s390intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/s390intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/s390intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/allocator_interface.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/allocator_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/allocator_interface.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/allocator_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/asan_interface.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/asan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/asan_interface.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/asan_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/common_interface_defs.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/common_interface_defs.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/common_interface_defs.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/coverage_interface.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/coverage_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/coverage_interface.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/coverage_interface.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/dfsan_interface.h
similarity index 79%
copy from darwin-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h
copy to linux-x86/lib64/clang/14.0.6/include/sanitizer/dfsan_interface.h
index d6209a3..8e581a6 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/sanitizer/dfsan_interface.h
+++ b/linux-x86/lib64/clang/14.0.6/include/sanitizer/dfsan_interface.h
@@ -27,6 +27,10 @@
 /// Signature of the callback argument to dfsan_set_write_callback().
 typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
 
+/// Signature of the callback argument to dfsan_set_conditional_callback().
+typedef void (*dfsan_conditional_callback_t)(dfsan_label label,
+                                             dfsan_origin origin);
+
 /// Computes the union of \c l1 and \c l2, resulting in a union label.
 dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
 
@@ -54,6 +58,10 @@
 /// Retrieves the label associated with the data at the given address.
 dfsan_label dfsan_read_label(const void *addr, size_t size);
 
+/// Return the origin associated with the first taint byte in the size bytes
+/// from the address addr.
+dfsan_origin dfsan_read_origin_of_first_taint(const void *addr, size_t size);
+
 /// Returns whether the given label label contains the label elem.
 int dfsan_has_label(dfsan_label label, dfsan_label elem);
 
@@ -70,6 +78,19 @@
 /// callback executes.  Pass in NULL to remove any callback.
 void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
 
+/// Sets a callback to be invoked on any conditional expressions which have a
+/// taint label set. This can be used to find where tainted data influences
+/// the behavior of the program.
+/// These callbacks will only be added when -dfsan-conditional-callbacks=true.
+void dfsan_set_conditional_callback(dfsan_conditional_callback_t callback);
+
+/// Conditional expressions occur during signal handlers.
+/// Making callbacks that handle signals well is tricky, so when
+/// -dfsan-conditional-callbacks=true, conditional expressions used in signal
+/// handlers will add the labels they see into a global (bitwise-or together).
+/// This function returns all label bits seen in signal handler conditions.
+dfsan_label dfsan_get_labels_in_signal_conditional();
+
 /// Interceptor hooks.
 /// Whenever a dfsan's custom function is called the corresponding
 /// hook is called it non-zero. The hooks should be defined by the user.
@@ -87,6 +108,9 @@
 /// prints description at the beginning of the trace. If origin tracking is not
 /// on, or the address is not labeled, it prints nothing.
 void dfsan_print_origin_trace(const void *addr, const char *description);
+/// As above, but use an origin id from dfsan_get_origin() instead of address.
+/// Does not include header line with taint label and address information.
+void dfsan_print_origin_id_trace(dfsan_origin origin);
 
 /// Prints the origin trace of the label at the address \p addr to a
 /// pre-allocated output buffer. If origin tracking is not on, or the address is
@@ -124,6 +148,10 @@
 /// return value is not less than \p out_buf_size.
 size_t dfsan_sprint_origin_trace(const void *addr, const char *description,
                                  char *out_buf, size_t out_buf_size);
+/// As above, but use an origin id from dfsan_get_origin() instead of address.
+/// Does not include header line with taint label and address information.
+size_t dfsan_sprint_origin_id_trace(dfsan_origin origin, char *out_buf,
+                                    size_t out_buf_size);
 
 /// Prints the stack trace leading to this call to a pre-allocated output
 /// buffer.
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/hwasan_interface.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/hwasan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/hwasan_interface.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/hwasan_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/linux_syscall_hooks.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/linux_syscall_hooks.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/linux_syscall_hooks.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/linux_syscall_hooks.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/lsan_interface.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/lsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/lsan_interface.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/lsan_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/msan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/msan_interface.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/msan_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/netbsd_syscall_hooks.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/netbsd_syscall_hooks.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/netbsd_syscall_hooks.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/netbsd_syscall_hooks.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/scudo_interface.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/scudo_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/scudo_interface.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/scudo_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/tsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/tsan_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface_atomic.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/tsan_interface_atomic.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/tsan_interface_atomic.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/tsan_interface_atomic.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sanitizer/ubsan_interface.h b/linux-x86/lib64/clang/14.0.6/include/sanitizer/ubsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sanitizer/ubsan_interface.h
rename to linux-x86/lib64/clang/14.0.6/include/sanitizer/ubsan_interface.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/serializeintrin.h b/linux-x86/lib64/clang/14.0.6/include/serializeintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/serializeintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/serializeintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/sgxintrin.h b/linux-x86/lib64/clang/14.0.6/include/sgxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/sgxintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/sgxintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/shaintrin.h b/linux-x86/lib64/clang/14.0.6/include/shaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/shaintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/shaintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/smmintrin.h b/linux-x86/lib64/clang/14.0.6/include/smmintrin.h
similarity index 99%
rename from linux-x86/lib64/clang/14.0.2/include/smmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/smmintrin.h
index 710e55a..0df59c5 100644
--- a/linux-x86/lib64/clang/14.0.2/include/smmintrin.h
+++ b/linux-x86/lib64/clang/14.0.6/include/smmintrin.h
@@ -668,7 +668,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_min_epi8 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
+  return (__m128i) __builtin_elementwise_min((__v16qs) __V1, (__v16qs) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -687,7 +687,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_max_epi8 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
+  return (__m128i) __builtin_elementwise_max((__v16qs) __V1, (__v16qs) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -706,7 +706,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_min_epu16 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
+  return (__m128i) __builtin_elementwise_min((__v8hu) __V1, (__v8hu) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -725,7 +725,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_max_epu16 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
+  return (__m128i) __builtin_elementwise_max((__v8hu) __V1, (__v8hu) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -744,7 +744,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_min_epi32 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
+  return (__m128i) __builtin_elementwise_min((__v4si) __V1, (__v4si) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -763,7 +763,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_max_epi32 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
+  return (__m128i) __builtin_elementwise_max((__v4si) __V1, (__v4si) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -782,7 +782,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_min_epu32 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
+  return (__m128i) __builtin_elementwise_min((__v4su) __V1, (__v4su) __V2);
 }
 
 /// Compares the corresponding elements of two 128-bit vectors of
@@ -801,7 +801,7 @@
 static __inline__  __m128i __DEFAULT_FN_ATTRS
 _mm_max_epu32 (__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
+  return (__m128i) __builtin_elementwise_max((__v4su) __V1, (__v4su) __V2);
 }
 
 /* SSE4 Insertion and Extraction from XMM Register Instructions.  */
diff --git a/linux-x86/lib64/clang/14.0.2/include/stdalign.h b/linux-x86/lib64/clang/14.0.6/include/stdalign.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/stdalign.h
rename to linux-x86/lib64/clang/14.0.6/include/stdalign.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/stdarg.h b/linux-x86/lib64/clang/14.0.6/include/stdarg.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/stdarg.h
rename to linux-x86/lib64/clang/14.0.6/include/stdarg.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/stdatomic.h b/linux-x86/lib64/clang/14.0.6/include/stdatomic.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/stdatomic.h
rename to linux-x86/lib64/clang/14.0.6/include/stdatomic.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/stdbool.h b/linux-x86/lib64/clang/14.0.6/include/stdbool.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/stdbool.h
rename to linux-x86/lib64/clang/14.0.6/include/stdbool.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/stddef.h b/linux-x86/lib64/clang/14.0.6/include/stddef.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/stddef.h
rename to linux-x86/lib64/clang/14.0.6/include/stddef.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/stdint.h b/linux-x86/lib64/clang/14.0.6/include/stdint.h
similarity index 76%
copy from darwin-x86/lib64/clang/14.0.2/include/stdint.h
copy to linux-x86/lib64/clang/14.0.6/include/stdint.h
index 192f653..4790c25 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/stdint.h
+++ b/linux-x86/lib64/clang/14.0.6/include/stdint.h
@@ -461,6 +461,18 @@
 # define INT64_MAX           INT64_C( 9223372036854775807)
 # define INT64_MIN         (-INT64_C( 9223372036854775807)-1)
 # define UINT64_MAX         UINT64_C(18446744073709551615)
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT64_WIDTH         64
+# define INT64_WIDTH          UINT64_WIDTH
+
+# define __UINT_LEAST64_WIDTH UINT64_WIDTH
+# define __UINT_LEAST32_WIDTH UINT64_WIDTH
+# define __UINT_LEAST16_WIDTH UINT64_WIDTH
+# define __UINT_LEAST8_MAX UINT64_MAX
+#endif /* __STDC_VERSION__ */
+
 # define __INT_LEAST64_MIN   INT64_MIN
 # define __INT_LEAST64_MAX   INT64_MAX
 # define __UINT_LEAST64_MAX UINT64_MAX
@@ -482,6 +494,15 @@
 # define INT_FAST64_MIN    __INT_LEAST64_MIN
 # define INT_FAST64_MAX    __INT_LEAST64_MAX
 # define UINT_FAST64_MAX  __UINT_LEAST64_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT_LEAST64_WIDTH __UINT_LEAST64_WIDTH
+# define INT_LEAST64_WIDTH  UINT_LEAST64_WIDTH
+# define UINT_FAST64_WIDTH  __UINT_LEAST64_WIDTH
+# define INT_FAST64_WIDTH   UINT_FAST64_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST64_MIN */
 
 
@@ -495,6 +516,7 @@
 # define INT_FAST56_MIN      INT56_MIN
 # define INT_FAST56_MAX      INT56_MAX
 # define UINT_FAST56_MAX    UINT56_MAX
+
 # define __INT_LEAST32_MIN   INT56_MIN
 # define __INT_LEAST32_MAX   INT56_MAX
 # define __UINT_LEAST32_MAX UINT56_MAX
@@ -504,6 +526,20 @@
 # define __INT_LEAST8_MIN    INT56_MIN
 # define __INT_LEAST8_MAX    INT56_MAX
 # define __UINT_LEAST8_MAX  UINT56_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT56_WIDTH         56
+# define INT56_WIDTH          UINT56_WIDTH
+# define UINT_LEAST56_WIDTH   UINT56_WIDTH
+# define INT_LEAST56_WIDTH    UINT_LEAST56_WIDTH
+# define UINT_FAST56_WIDTH    UINT56_WIDTH
+# define INT_FAST56_WIDTH     UINT_FAST56_WIDTH
+# define __UINT_LEAST32_WIDTH UINT56_WIDTH
+# define __UINT_LEAST16_WIDTH UINT56_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT56_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT56_TYPE__ */
 
 
@@ -517,6 +553,7 @@
 # define INT_FAST48_MIN      INT48_MIN
 # define INT_FAST48_MAX      INT48_MAX
 # define UINT_FAST48_MAX    UINT48_MAX
+
 # define __INT_LEAST32_MIN   INT48_MIN
 # define __INT_LEAST32_MAX   INT48_MAX
 # define __UINT_LEAST32_MAX UINT48_MAX
@@ -526,6 +563,20 @@
 # define __INT_LEAST8_MIN    INT48_MIN
 # define __INT_LEAST8_MAX    INT48_MAX
 # define __UINT_LEAST8_MAX  UINT48_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+#define UINT48_WIDTH         48
+#define INT48_WIDTH          UINT48_WIDTH
+#define UINT_LEAST48_WIDTH   UINT48_WIDTH
+#define INT_LEAST48_WIDTH    UINT_LEAST48_WIDTH
+#define UINT_FAST48_WIDTH    UINT48_WIDTH
+#define INT_FAST48_WIDTH     UINT_FAST48_WIDTH
+#define __UINT_LEAST32_WIDTH UINT48_WIDTH
+#define __UINT_LEAST16_WIDTH UINT48_WIDTH
+#define __UINT_LEAST8_WIDTH  UINT48_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT48_TYPE__ */
 
 
@@ -539,6 +590,7 @@
 # define INT_FAST40_MIN      INT40_MIN
 # define INT_FAST40_MAX      INT40_MAX
 # define UINT_FAST40_MAX    UINT40_MAX
+
 # define __INT_LEAST32_MIN   INT40_MIN
 # define __INT_LEAST32_MAX   INT40_MAX
 # define __UINT_LEAST32_MAX UINT40_MAX
@@ -548,6 +600,20 @@
 # define __INT_LEAST8_MIN    INT40_MIN
 # define __INT_LEAST8_MAX    INT40_MAX
 # define __UINT_LEAST8_MAX  UINT40_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT40_WIDTH         40
+# define INT40_WIDTH          UINT40_WIDTH
+# define UINT_LEAST40_WIDTH   UINT40_WIDTH
+# define INT_LEAST40_WIDTH    UINT_LEAST40_WIDTH
+# define UINT_FAST40_WIDTH    UINT40_WIDTH
+# define INT_FAST40_WIDTH     UINT_FAST40_WIDTH
+# define __UINT_LEAST32_WIDTH UINT40_WIDTH
+# define __UINT_LEAST16_WIDTH UINT40_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT40_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT40_TYPE__ */
 
 
@@ -555,6 +621,7 @@
 # define INT32_MAX           INT32_C(2147483647)
 # define INT32_MIN         (-INT32_C(2147483647)-1)
 # define UINT32_MAX         UINT32_C(4294967295)
+
 # define __INT_LEAST32_MIN   INT32_MIN
 # define __INT_LEAST32_MAX   INT32_MAX
 # define __UINT_LEAST32_MAX UINT32_MAX
@@ -564,6 +631,16 @@
 # define __INT_LEAST8_MIN    INT32_MIN
 # define __INT_LEAST8_MAX    INT32_MAX
 # define __UINT_LEAST8_MAX  UINT32_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT32_WIDTH         32
+# define INT32_WIDTH          UINT32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT32_WIDTH
+# define __UINT_LEAST16_WIDTH UINT32_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT32_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT32_TYPE__ */
 
 #ifdef __INT_LEAST32_MIN
@@ -573,6 +650,15 @@
 # define INT_FAST32_MIN    __INT_LEAST32_MIN
 # define INT_FAST32_MAX    __INT_LEAST32_MAX
 # define UINT_FAST32_MAX  __UINT_LEAST32_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT_LEAST32_WIDTH __UINT_LEAST32_WIDTH
+# define INT_LEAST32_WIDTH  UINT_LEAST32_WIDTH
+# define UINT_FAST32_WIDTH  __UINT_LEAST32_WIDTH
+# define INT_FAST32_WIDTH   UINT_FAST32_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST32_MIN */
 
 
@@ -586,12 +672,26 @@
 # define INT_FAST24_MIN      INT24_MIN
 # define INT_FAST24_MAX      INT24_MAX
 # define UINT_FAST24_MAX    UINT24_MAX
+
 # define __INT_LEAST16_MIN   INT24_MIN
 # define __INT_LEAST16_MAX   INT24_MAX
 # define __UINT_LEAST16_MAX UINT24_MAX
 # define __INT_LEAST8_MIN    INT24_MIN
 # define __INT_LEAST8_MAX    INT24_MAX
 # define __UINT_LEAST8_MAX  UINT24_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT24_WIDTH         24
+# define INT24_WIDTH          UINT24_WIDTH
+# define UINT_LEAST24_WIDTH   UINT24_WIDTH
+# define INT_LEAST24_WIDTH    UINT_LEAST24_WIDTH
+# define UINT_FAST24_WIDTH    UINT24_WIDTH
+# define INT_FAST24_WIDTH     UINT_FAST24_WIDTH
+# define __UINT_LEAST16_WIDTH UINT24_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT24_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT24_TYPE__ */
 
 
@@ -599,12 +699,22 @@
 #define INT16_MAX            INT16_C(32767)
 #define INT16_MIN          (-INT16_C(32767)-1)
 #define UINT16_MAX          UINT16_C(65535)
+
 # define __INT_LEAST16_MIN   INT16_MIN
 # define __INT_LEAST16_MAX   INT16_MAX
 # define __UINT_LEAST16_MAX UINT16_MAX
 # define __INT_LEAST8_MIN    INT16_MIN
 # define __INT_LEAST8_MAX    INT16_MAX
 # define __UINT_LEAST8_MAX  UINT16_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT16_WIDTH         16
+# define INT16_WIDTH          UINT16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT16_WIDTH
+# define __UINT_LEAST8_WIDTH  UINT16_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT16_TYPE__ */
 
 #ifdef __INT_LEAST16_MIN
@@ -614,6 +724,15 @@
 # define INT_FAST16_MIN    __INT_LEAST16_MIN
 # define INT_FAST16_MAX    __INT_LEAST16_MAX
 # define UINT_FAST16_MAX  __UINT_LEAST16_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT_LEAST16_WIDTH __UINT_LEAST16_WIDTH
+# define INT_LEAST16_WIDTH  UINT_LEAST16_WIDTH
+# define UINT_FAST16_WIDTH  __UINT_LEAST16_WIDTH
+# define INT_FAST16_WIDTH   UINT_FAST16_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST16_MIN */
 
 
@@ -621,9 +740,18 @@
 # define INT8_MAX            INT8_C(127)
 # define INT8_MIN          (-INT8_C(127)-1)
 # define UINT8_MAX          UINT8_C(255)
+
 # define __INT_LEAST8_MIN    INT8_MIN
 # define __INT_LEAST8_MAX    INT8_MAX
 # define __UINT_LEAST8_MAX  UINT8_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT8_WIDTH         8
+# define INT8_WIDTH          UINT8_WIDTH
+# define __UINT_LEAST8_WIDTH UINT8_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT8_TYPE__ */
 
 #ifdef __INT_LEAST8_MIN
@@ -633,6 +761,15 @@
 # define INT_FAST8_MIN    __INT_LEAST8_MIN
 # define INT_FAST8_MAX    __INT_LEAST8_MAX
 # define UINT_FAST8_MAX  __UINT_LEAST8_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT_LEAST8_WIDTH __UINT_LEAST8_WIDTH
+# define INT_LEAST8_WIDTH  UINT_LEAST8_WIDTH
+# define UINT_FAST8_WIDTH  __UINT_LEAST8_WIDTH
+# define INT_FAST8_WIDTH   UINT_FAST8_WIDTH
+#endif /* __STDC_VERSION__ */
 #endif /* __INT_LEAST8_MIN */
 
 /* Some utility macros */
@@ -652,6 +789,16 @@
 #define PTRDIFF_MAX   __PTRDIFF_MAX__
 #define    SIZE_MAX      __SIZE_MAX__
 
+/* C2x 7.20.2.4 Width of integer types capable of holding object pointers. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+/* NB: The C standard requires that these be the same value, but the compiler
+   exposes separate internal width macros. */
+#define INTPTR_WIDTH  __INTPTR_WIDTH__
+#define UINTPTR_WIDTH __UINTPTR_WIDTH__
+#endif
+
 /* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__
  * is enabled. */
 #if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
@@ -663,6 +810,16 @@
 #define  INTMAX_MAX   __INTMAX_MAX__
 #define UINTMAX_MAX  __UINTMAX_MAX__
 
+/* C2x 7.20.2.5 Width of greatest-width integer types. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+/* NB: The C standard requires that these be the same value, but the compiler
+   exposes separate internal width macros. */
+#define INTMAX_WIDTH __INTMAX_WIDTH__
+#define UINTMAX_WIDTH __UINTMAX_WIDTH__
+#endif
+
 /* C99 7.18.3 Limits of other integer types. */
 #define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
 #define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
@@ -689,5 +846,16 @@
 #define  INTMAX_C(v) __int_c(v,  __INTMAX_C_SUFFIX__)
 #define UINTMAX_C(v) __int_c(v, __UINTMAX_C_SUFFIX__)
 
+/* C2x 7.20.3.x Width of other integer types. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+   in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+#define PTRDIFF_WIDTH    __PTRDIFF_WIDTH__
+#define SIG_ATOMIC_WIDTH __SIG_ATOMIC_WIDTH__
+#define SIZE_WIDTH       __SIZE_WIDTH__
+#define WCHAR_WIDTH      __WCHAR_WIDTH__
+#define WINT_WIDTH       __WINT_WIDTH__
+#endif
+
 #endif /* __STDC_HOSTED__ */
 #endif /* __CLANG_STDINT_H */
diff --git a/linux-x86/lib64/clang/14.0.2/include/stdnoreturn.h b/linux-x86/lib64/clang/14.0.6/include/stdnoreturn.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/stdnoreturn.h
rename to linux-x86/lib64/clang/14.0.6/include/stdnoreturn.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/tbmintrin.h b/linux-x86/lib64/clang/14.0.6/include/tbmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/tbmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/tbmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/tgmath.h b/linux-x86/lib64/clang/14.0.6/include/tgmath.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/tgmath.h
rename to linux-x86/lib64/clang/14.0.6/include/tgmath.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/tmmintrin.h b/linux-x86/lib64/clang/14.0.6/include/tmmintrin.h
similarity index 99%
rename from linux-x86/lib64/clang/14.0.2/include/tmmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/tmmintrin.h
index bcffa81..cb9be23 100644
--- a/linux-x86/lib64/clang/14.0.2/include/tmmintrin.h
+++ b/linux-x86/lib64/clang/14.0.6/include/tmmintrin.h
@@ -53,7 +53,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi8(__m128i __a)
 {
-    return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a);
+    return (__m128i)__builtin_elementwise_abs((__v16qs)__a);
 }
 
 /// Computes the absolute value of each of the packed 16-bit signed
@@ -89,7 +89,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi16(__m128i __a)
 {
-    return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a);
+    return (__m128i)__builtin_elementwise_abs((__v8hi)__a);
 }
 
 /// Computes the absolute value of each of the packed 32-bit signed
@@ -125,7 +125,7 @@
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_abs_epi32(__m128i __a)
 {
-    return (__m128i)__builtin_ia32_pabsd128((__v4si)__a);
+    return (__m128i)__builtin_elementwise_abs((__v4si)__a);
 }
 
 /// Concatenates the two 128-bit integer vector operands, and
diff --git a/linux-x86/lib64/clang/14.0.2/include/tsxldtrkintrin.h b/linux-x86/lib64/clang/14.0.6/include/tsxldtrkintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/tsxldtrkintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/tsxldtrkintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/uintrintrin.h b/linux-x86/lib64/clang/14.0.6/include/uintrintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/uintrintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/uintrintrin.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/unwind.h b/linux-x86/lib64/clang/14.0.6/include/unwind.h
similarity index 98%
copy from darwin-x86/lib64/clang/14.0.2/include/unwind.h
copy to linux-x86/lib64/clang/14.0.6/include/unwind.h
index 029524b..6e06979 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/unwind.h
+++ b/linux-x86/lib64/clang/14.0.6/include/unwind.h
@@ -172,7 +172,8 @@
   _UVRSC_CORE = 0,        /* integer register */
   _UVRSC_VFP = 1,         /* vfp */
   _UVRSC_WMMXD = 3,       /* Intel WMMX data register */
-  _UVRSC_WMMXC = 4        /* Intel WMMX control register */
+  _UVRSC_WMMXC = 4,       /* Intel WMMX control register */
+  _UVRSC_PSEUDO = 5       /* Special purpose pseudo register */
 } _Unwind_VRS_RegClass;
 
 typedef enum {
diff --git a/linux-x86/lib64/clang/14.0.2/include/vadefs.h b/linux-x86/lib64/clang/14.0.6/include/vadefs.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/vadefs.h
rename to linux-x86/lib64/clang/14.0.6/include/vadefs.h
diff --git a/darwin-x86/lib64/clang/14.0.2/include/vaesintrin.h b/linux-x86/lib64/clang/14.0.6/include/vaesintrin.h
similarity index 98%
copy from darwin-x86/lib64/clang/14.0.2/include/vaesintrin.h
copy to linux-x86/lib64/clang/14.0.6/include/vaesintrin.h
index f3c0807..294dcff 100644
--- a/darwin-x86/lib64/clang/14.0.2/include/vaesintrin.h
+++ b/linux-x86/lib64/clang/14.0.6/include/vaesintrin.h
@@ -82,4 +82,4 @@
 #undef __DEFAULT_FN_ATTRS
 #undef __DEFAULT_FN_ATTRS_F
 
-#endif
+#endif // __VAESINTRIN_H
diff --git a/linux-x86/lib64/clang/14.0.2/include/varargs.h b/linux-x86/lib64/clang/14.0.6/include/varargs.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/varargs.h
rename to linux-x86/lib64/clang/14.0.6/include/varargs.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/vecintrin.h b/linux-x86/lib64/clang/14.0.6/include/vecintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/vecintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/vecintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/vpclmulqdqintrin.h b/linux-x86/lib64/clang/14.0.6/include/vpclmulqdqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/vpclmulqdqintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/vpclmulqdqintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/waitpkgintrin.h b/linux-x86/lib64/clang/14.0.6/include/waitpkgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/waitpkgintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/waitpkgintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/wasm_simd128.h b/linux-x86/lib64/clang/14.0.6/include/wasm_simd128.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/wasm_simd128.h
rename to linux-x86/lib64/clang/14.0.6/include/wasm_simd128.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/wbnoinvdintrin.h b/linux-x86/lib64/clang/14.0.6/include/wbnoinvdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/wbnoinvdintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/wbnoinvdintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/wmmintrin.h b/linux-x86/lib64/clang/14.0.6/include/wmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/wmmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/wmmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/x86gprintrin.h b/linux-x86/lib64/clang/14.0.6/include/x86gprintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/x86gprintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/x86gprintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/x86intrin.h b/linux-x86/lib64/clang/14.0.6/include/x86intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/x86intrin.h
rename to linux-x86/lib64/clang/14.0.6/include/x86intrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/xmmintrin.h b/linux-x86/lib64/clang/14.0.6/include/xmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xmmintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/xmmintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/xopintrin.h b/linux-x86/lib64/clang/14.0.6/include/xopintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xopintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/xopintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/xsavecintrin.h b/linux-x86/lib64/clang/14.0.6/include/xsavecintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xsavecintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/xsavecintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/xsaveintrin.h b/linux-x86/lib64/clang/14.0.6/include/xsaveintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xsaveintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/xsaveintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/xsaveoptintrin.h b/linux-x86/lib64/clang/14.0.6/include/xsaveoptintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xsaveoptintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/xsaveoptintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/xsavesintrin.h b/linux-x86/lib64/clang/14.0.6/include/xsavesintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xsavesintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/xsavesintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/include/xtestintrin.h b/linux-x86/lib64/clang/14.0.6/include/xtestintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/include/xtestintrin.h
rename to linux-x86/lib64/clang/14.0.6/include/xtestintrin.h
diff --git a/linux-x86/lib64/clang/14.0.2/share/asan_ignorelist.txt b/linux-x86/lib64/clang/14.0.6/share/asan_ignorelist.txt
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/share/asan_ignorelist.txt
rename to linux-x86/lib64/clang/14.0.6/share/asan_ignorelist.txt
diff --git a/linux-x86/lib64/clang/14.0.2/share/cfi_ignorelist.txt b/linux-x86/lib64/clang/14.0.6/share/cfi_ignorelist.txt
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/share/cfi_ignorelist.txt
rename to linux-x86/lib64/clang/14.0.6/share/cfi_ignorelist.txt
diff --git a/linux-x86/lib64/clang/14.0.2/share/dfsan_abilist.txt b/linux-x86/lib64/clang/14.0.6/share/dfsan_abilist.txt
similarity index 99%
rename from linux-x86/lib64/clang/14.0.2/share/dfsan_abilist.txt
rename to linux-x86/lib64/clang/14.0.6/share/dfsan_abilist.txt
index 18e069c..6a3094f 100644
--- a/linux-x86/lib64/clang/14.0.2/share/dfsan_abilist.txt
+++ b/linux-x86/lib64/clang/14.0.6/share/dfsan_abilist.txt
@@ -30,16 +30,26 @@
 fun:dfsan_flush=discard
 fun:dfsan_print_origin_trace=uninstrumented
 fun:dfsan_print_origin_trace=discard
+fun:dfsan_print_origin_id_trace=uninstrumented
+fun:dfsan_print_origin_id_trace=discard
 fun:dfsan_sprint_origin_trace=uninstrumented
 fun:dfsan_sprint_origin_trace=discard
+fun:dfsan_sprint_origin_id_trace=uninstrumented
+fun:dfsan_sprint_origin_id_trace=discard
 fun:dfsan_sprint_stack_trace=uninstrumented
 fun:dfsan_sprint_stack_trace=discard
 fun:dfsan_get_origin=uninstrumented
 fun:dfsan_get_origin=custom
+fun:dfsan_read_origin_of_first_taint=uninstrumented
+fun:dfsan_read_origin_of_first_taint=discard
 fun:dfsan_get_init_origin=uninstrumented
 fun:dfsan_get_init_origin=discard
 fun:dfsan_get_track_origins=uninstrumented
 fun:dfsan_get_track_origins=discard
+fun:dfsan_set_conditional_callback=uninstrumented
+fun:dfsan_set_conditional_callback=discard
+fun:dfsan_get_labels_in_signal_conditional=uninstrumented
+fun:dfsan_get_labels_in_signal_conditional=discard
 
 ###############################################################################
 # glibc
diff --git a/linux-x86/lib64/clang/14.0.2/share/hwasan_ignorelist.txt b/linux-x86/lib64/clang/14.0.6/share/hwasan_ignorelist.txt
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/share/hwasan_ignorelist.txt
rename to linux-x86/lib64/clang/14.0.6/share/hwasan_ignorelist.txt
diff --git a/linux-x86/lib64/clang/14.0.2/share/msan_ignorelist.txt b/linux-x86/lib64/clang/14.0.6/share/msan_ignorelist.txt
similarity index 100%
rename from linux-x86/lib64/clang/14.0.2/share/msan_ignorelist.txt
rename to linux-x86/lib64/clang/14.0.6/share/msan_ignorelist.txt
diff --git a/linux-x86/lib64/libbase.so b/linux-x86/lib64/libbase.so
index a81fa45..0ed3b6b 100755
--- a/linux-x86/lib64/libbase.so
+++ b/linux-x86/lib64/libbase.so
Binary files differ
diff --git a/linux-x86/lib64/libc++.so b/linux-x86/lib64/libc++.so
index 17b14c9..e22c3b1 100755
--- a/linux-x86/lib64/libc++.so
+++ b/linux-x86/lib64/libc++.so
Binary files differ
diff --git a/linux-x86/lib64/libc++.so.1 b/linux-x86/lib64/libc++.so.1
index a653f15..24340d2 100644
--- a/linux-x86/lib64/libc++.so.1
+++ b/linux-x86/lib64/libc++.so.1
Binary files differ
diff --git a/linux-x86/lib64/libclang-cpp.so.14git b/linux-x86/lib64/libclang-cpp.so.14git
index 0873573..a0725ad 100644
--- a/linux-x86/lib64/libclang-cpp.so.14git
+++ b/linux-x86/lib64/libclang-cpp.so.14git
Binary files differ
diff --git a/linux-x86/lib64/liblog.so b/linux-x86/lib64/liblog.so
index eaf85b2..8c0f306 100755
--- a/linux-x86/lib64/liblog.so
+++ b/linux-x86/lib64/liblog.so
Binary files differ
diff --git a/linux-x86/lib64/libprotobuf-cpp-full.so b/linux-x86/lib64/libprotobuf-cpp-full.so
index 3b5b9ba..afe9ea3 100755
--- a/linux-x86/lib64/libprotobuf-cpp-full.so
+++ b/linux-x86/lib64/libprotobuf-cpp-full.so
Binary files differ
diff --git a/linux-x86/lib64/libz-host.so b/linux-x86/lib64/libz-host.so
index ea62227..250e777 100755
--- a/linux-x86/lib64/libz-host.so
+++ b/linux-x86/lib64/libz-host.so
Binary files differ
diff --git a/linux-x86/lib64/libziparchive.so b/linux-x86/lib64/libziparchive.so
index 8f9735c..bc356b1 100755
--- a/linux-x86/lib64/libziparchive.so
+++ b/linux-x86/lib64/libziparchive.so
Binary files differ
diff --git a/manifest.xml b/manifest.xml
index 597da4c..88be875 100644
--- a/manifest.xml
+++ b/manifest.xml
@@ -5,77 +5,77 @@
 
   <default revision="master" remote="aosp" sync-j="4" />
 
-  <project path="build/make" name="platform/build" revision="3f852994b489372f8ce22bfe011e21241f85a1dc">
+  <project path="build/make" name="platform/build" revision="9261a656ca249eba5195f06b3f753f14b9ca0ca3">
     <linkfile dest="build/tools" src="tools" />
 </project>
 
-  <project path="build/blueprint" name="platform/build/blueprint" revision="5860caea339440f1040ee841c4e6bcfb4fd286ea" />
+  <project path="build/blueprint" name="platform/build/blueprint" revision="513e27a6a6702f3bcb218bd69e50b02af4f56772" />
 
   <project path="build/kati" name="platform/build/kati" revision="a6fd9e68b5a25b8c5726e2f653f5af47ef9087bf" />
 
-  <project path="build/soong" name="platform/build/soong" revision="62192b883b344f346359065cf1ca31a8e3e28a4e">
+  <project path="build/soong" name="platform/build/soong" revision="8cab5906da1a572c6499fb5a305046fc0a6630b0">
     <linkfile dest="Android.bp" src="root.bp" />
 
     <linkfile dest="bootstrap.bash" src="bootstrap.bash" />
 </project>
 
-  <project path="external/golang-protobuf" name="platform/external/golang-protobuf" revision="3d215d4dc1c4ea8ff8ea484faf3420b48a75d98f" />
+  <project path="external/golang-protobuf" name="platform/external/golang-protobuf" revision="c26e56ffbed22acce9c2cc14ed164bf4f66f9c4a" />
 
-  <project path="prebuilts/build-tools" name="platform/prebuilts/build-tools" clone-depth="1" revision="4cd39c7c1e5d3df1ae1d8f8d240b0a58e18009c1" />
+  <project path="prebuilts/build-tools" name="platform/prebuilts/build-tools" clone-depth="1" revision="0c6759617a9fe14cd65014cf94d582eb11f6816e" />
 
-  <project path="prebuilts/clang-tools" name="platform/prebuilts/clang-tools" clone-depth="3" revision="42cd22c7183b98020341773430c858b064f7177a" />
+  <project path="prebuilts/clang-tools" name="platform/prebuilts/clang-tools" clone-depth="3" revision="8be4a2386eef2e2337c654ace5ceeb1d4d9c4615" />
 
-  <project path="prebuilts/clang/host/linux-x86" name="platform/prebuilts/clang/host/linux-x86" groups="linux" clone-depth="1" revision="7082f5c77f7e1aad188674c09b029eb6d2a10fa7" />
+  <project path="prebuilts/clang/host/linux-x86" name="platform/prebuilts/clang/host/linux-x86" groups="linux" clone-depth="1" revision="bf66969572c8462b88e6a1f34b339ffc60380c35" />
 
   <project path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" clone-depth="1" revision="e089f0d72820a43be332be964643b83a32e4b1a7" />
 
-  <project path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" clone-depth="1" revision="d38e3ac65e3c32bfdd55d5caad5e13d084e9f30e" />
+  <project path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" clone-depth="1" revision="c2f3c6976a1b064810179e70351b67fa577fbc80" />
 
-  <project path="prebuilts/go/linux-x86" name="platform/prebuilts/go/linux-x86" groups="linux" clone-depth="1" revision="59ee7806ffe03f6860951ad60e5fc94ae6292b12" />
+  <project path="prebuilts/go/linux-x86" name="platform/prebuilts/go/linux-x86" groups="linux" clone-depth="1" revision="e0d0a021ccdfc5702b43f7b1fd74226f75bf61cf" />
 
   <project path="prebuilts/ninja/linux-x86" name="platform/prebuilts/ninja/linux-x86" groups="linux" clone-depth="1" revision="8a10824f74fe0e22af9bf314a837f5b70e2bb67f" />
 
-  <project path="prebuilts/rust" name="platform/prebuilts/rust" clone-depth="1" revision="74f785433c788f2e604c13475e2c66ec05a3599c" />
+  <project path="prebuilts/rust" name="platform/prebuilts/rust" clone-depth="1" revision="8d72891d8b7777368d35b906ec4131479ea54d96" />
 
-  <project path="prebuilts/clang/host/darwin-x86" name="platform/prebuilts/clang/host/darwin-x86" groups="darwin" clone-depth="1" revision="5b25d50ef4f64a15040f423befc064461cc42a0a" />
+  <project path="prebuilts/clang/host/darwin-x86" name="platform/prebuilts/clang/host/darwin-x86" groups="darwin" clone-depth="1" revision="888598fdad8cbfc34706b6201fe7dbf28bc85ff9" />
 
   <project path="prebuilts/gcc/darwin-x86/host/headers" name="platform/prebuilts/gcc/darwin-x86/host/headers" groups="darwin" clone-depth="1" revision="4ac4f7cc41cf3c9e36fc3d6cf37fd1cfa9587a68" />
 
   <project path="prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" name="platform/prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" groups="darwin" clone-depth="1" revision="e4ca4c920fa61a3277febc4be9ad69fc221cc92a" />
 
-  <project path="prebuilts/go/darwin-x86" name="platform/prebuilts/go/darwin-x86" groups="darwin" clone-depth="1" revision="bc60c3cc0809f4fcd1dae67e1e4677d33ff252eb" />
+  <project path="prebuilts/go/darwin-x86" name="platform/prebuilts/go/darwin-x86" groups="darwin" clone-depth="1" revision="abed8864c12ae2506c6cc9c69ce650d3b677b187" />
 
   <project path="prebuilts/ninja/darwin-x86" name="platform/prebuilts/ninja/darwin-x86" groups="darwin" clone-depth="1" revision="f321e197944c19d273cec788b9a3e8ca94331248" />
 
-  <project path="prebuilts/jdk/jdk11" name="platform/prebuilts/jdk/jdk11" clone-depth="1" revision="03dd391c3b4cc25d59ae3e87836777e1e1a69766" />
+  <project path="prebuilts/jdk/jdk11" name="platform/prebuilts/jdk/jdk11" clone-depth="1" revision="01125e0674e08a209052e8b320ef2f59da32fe30" />
 
   <project path="prebuilts/jdk/jdk8" name="platform/prebuilts/jdk/jdk8" clone-depth="1" revision="74e4f1844dfa9b8df9e0fe2ff34a2ecc24d52b07" />
 
   <project path="prebuilts/jdk/jdk9" name="platform/prebuilts/jdk/jdk9" clone-depth="1" revision="1f0b937930e83b0f7470b9555ad289153072882f" />
 
-  <project path="prebuilts/misc" name="platform/prebuilts/misc" clone-depth="1" revision="f8343324cfd7122c8622037400a1d7981c559f29" />
+  <project path="prebuilts/misc" name="platform/prebuilts/misc" clone-depth="1" revision="51dd71ec2ed73543128e15de1670a40fee4ba067" />
 
-  <project path="bionic" name="platform/bionic" revision="34da4b5842bffa11f5e8e6b7a2eb5e19d8e82ccc" />
+  <project path="bionic" name="platform/bionic" revision="c00d06867a7a27d8a610f81b0409a20fcc50db69" />
 
-  <project path="development" name="platform/development" revision="d950d19bd4b5f18df26a2557f7d947c3fdd65735" />
+  <project path="development" name="platform/development" revision="2056001c65fcb418682d6c93875f502428f1e086" />
 
-  <project path="external/abseil-cpp" name="platform/external/abseil-cpp" revision="b2e637834e467cde5d914c3fee2f230919717c4e" />
+  <project path="external/abseil-cpp" name="platform/external/abseil-cpp" revision="71a68520a63a5bae1b6bee6f0b79184ffa1aa99f" />
 
-  <project path="external/boringssl" name="platform/external/boringssl" groups="pdk" revision="1b7cc87c790e88ecee0f6b5e314cc849336ac4fc" />
+  <project path="external/boringssl" name="platform/external/boringssl" groups="pdk" revision="b9e46fc97b5fcb64f753fdba8e87cc50a819bae0" />
 
-  <project path="external/clang" name="platform/external/clang" revision="11addfade52915a076f879e6ee181d6f863138de" />
+  <project path="external/clang" name="platform/external/clang" revision="b8e8dddd89baa1319d537fe4d820954f1b32ddbe" />
 
-  <project path="external/compiler-rt" name="platform/external/compiler-rt" revision="c4b55a3176985b7f286984ab1972f9efbf25a5ac" />
+  <project path="external/compiler-rt" name="platform/external/compiler-rt" revision="b429e93fbf939d13875665f7666c36dde342371d" />
 
-  <project path="external/fmtlib" name="platform/external/fmtlib" revision="ff95f5436627c3640f88049175d3fe0d0937305d" />
+  <project path="external/fmtlib" name="platform/external/fmtlib" revision="79aef5400b9ed37c08e679cae9fcbad424442a03" />
 
-  <project path="external/gflags" name="platform/external/gflags" groups="pdk" revision="f932b1878644c2f2c74ca4b8978254099038c719" />
+  <project path="external/gflags" name="platform/external/gflags" groups="pdk" revision="061f68cd158fa658ec0b9b2b989ed55764870047" />
 
-  <project path="external/go-cmp" name="platform/external/go-cmp" revision="435f492b8ec1566bcf5370d37fa8999c8789c2a8" />
+  <project path="external/go-cmp" name="platform/external/go-cmp" revision="8dab502b73fb06a73ff15a638b1525eb2dd673f2" />
 
-  <project path="external/go-creachadair-shell" name="platform/external/go-creachadair-shell" revision="e166c15bebef29f9fbcd12a3037cf6a3b0769012" />
+  <project path="external/go-creachadair-shell" name="platform/external/go-creachadair-shell" revision="3f0ffb2e3d6c03069ddc73614aafc4fdc519b3f2" />
 
-  <project path="external/go-creachadair-stringset" name="platform/external/go-creachadair-stringset" revision="33c87f956e58cfb7a3c699f402cca1585c1bc39c" />
+  <project path="external/go-creachadair-stringset" name="platform/external/go-creachadair-stringset" revision="900e89eb77f53a2224404bb07a99da9a5286e6e6" />
 
   <project path="external/go-etree" name="platform/external/go-etree" revision="7fa46d9c9eb9134443a7fc56a354f07b15fc3a76" />
 
@@ -83,103 +83,103 @@
 
   <project path="external/golang-x-sync" name="platform/external/golang-x-sync" revision="017d6e2373ef77b5861649f777857f07b195f59c" />
 
-  <project path="external/golang-x-sys" name="platform/external/golang-x-sys" revision="b7fed96c1ad0b91e858531accb49d819b48ab618" />
+  <project path="external/golang-x-sys" name="platform/external/golang-x-sys" revision="cabb8ac6ecc5813fd1e67246fc492f92ed31fe06" />
 
-  <project path="external/golang-x-tools" name="platform/external/golang-x-tools" revision="d6d1ab63f7e2d16fb9a1f1d29755d12da90aa0bb" />
+  <project path="external/golang-x-tools" name="platform/external/golang-x-tools" revision="f10932f763d058b0dcb3acfb795c869996fef47b" />
 
-  <project path="external/googletest" name="platform/external/googletest" revision="2c80bd21163728625b3ed234e27cc5c3051c118c" />
+  <project path="external/googletest" name="platform/external/googletest" revision="a8b15f02268cb85e93925b737cd003eb711c0ca4" />
 
-  <project path="external/jsoncpp" name="platform/external/jsoncpp" revision="90de0e337692839a24c8266166e527905ea1dedd" />
+  <project path="external/jsoncpp" name="platform/external/jsoncpp" revision="22d33955e4c2525e659f729c9f02afe09183b107" />
 
-  <project path="external/kythe" name="platform/external/kythe" revision="726c3cbd918db7997008d2d88f2e6318247fc7e1" />
+  <project path="external/kythe" name="platform/external/kythe" revision="8a2bb4d4c8d9d122fe6e2e3373bca43f8a9a45a7" />
 
-  <project path="external/libcxx" name="platform/external/libcxx" revision="df8cb36848886320003feca8487816119ac9cfe5" />
+  <project path="external/libcxx" name="platform/external/libcxx" revision="fe75d5e6ebb70c906ac91ff8d55d4dcfa1d8e100" />
 
-  <project path="external/libcxxabi" name="platform/external/libcxxabi" revision="1c0c59d81658530697d0a4dee184cf5bb2eed502" />
+  <project path="external/libcxxabi" name="platform/external/libcxxabi" revision="9b8c5479211a1a1dc5d35d399f88344fd1f18610" />
 
   <project path="external/libunwind" name="platform/external/libunwind" revision="ec57b05e0acc7838d2a0f75551a18d28f670692e" />
 
   <project path="external/libunwind_llvm" name="platform/external/libunwind_llvm" revision="b0da2a6ab983c43638eea6e70f46d8265cb29d3a" />
 
-  <project path="external/llvm" name="platform/external/llvm" revision="7d0af1d16600352c5a243a0be80210f2a307ba6f" />
+  <project path="external/llvm" name="platform/external/llvm" revision="463426410dccca2ed8e2d8e7ae88bdcf4ba915ea" />
 
-  <project path="external/protobuf" name="platform/external/protobuf" revision="1523bc6d1ad2933d6c6a5ecdc845e24f0cb6f42b" />
+  <project path="external/protobuf" name="platform/external/protobuf" revision="b22fc3be3f37a86a591c20d73cb76a2bd738852f" />
 
-  <project path="external/python/cpython2" name="platform/external/python/cpython2" revision="d9205f3aa700214fb7f435c873a84bd2a1e70373" />
+  <project path="external/python/cpython2" name="platform/external/python/cpython2" revision="f9567734433a32ee2ed7d2a61ad4a1e315e04154" />
 
-  <project path="external/python/cpython3" name="platform/external/python/cpython3" revision="76fcd27dd7894d37034f529b057e3e80b0fb9cd1" />
+  <project path="external/python/cpython3" name="platform/external/python/cpython3" revision="8be82e547ed3008337db8017332ddd6621ab00c9" />
 
   <project path="external/rapidjson" name="platform/external/rapidjson" revision="9fa2a3d9e356a1f42a6184dcf1e0508ddfa9dbfb" />
 
   <project path="external/regex-re2" name="platform/external/regex-re2" revision="84e28962b2c2f357b5daccb460501b169193fafe" />
 
-  <project path="external/starlark-go" name="platform/external/starlark-go" revision="3f012eaf4c5218a4547ed55682358369aadae0be" />
+  <project path="external/starlark-go" name="platform/external/starlark-go" revision="312f9e324bdf0bde540b9a64d05ce0db85180478" />
 
-  <project path="external/zlib" name="platform/external/zlib" revision="9163b0eef1def86a87724506ce439a08d6bf563b" />
+  <project path="external/zlib" name="platform/external/zlib" revision="e3455e1e066be90c1812ccb1ad76f60b6624a118" />
 
-  <project path="external/zopfli" name="platform/external/zopfli" revision="2999cd7058e3a1fd596e9f00b17d35eebcd46879" />
+  <project path="external/zopfli" name="platform/external/zopfli" revision="15fdf31c61251f3e5aa3b188df2770eb153b9484" />
 
-  <project path="system/core" name="platform/system/core" revision="ae0f4d5e4510eff3f54986a7b9252d2b9ded7b66" />
+  <project path="system/core" name="platform/system/core" revision="05ae1f5f10196de7fc8f6fb5b0d8e1d03fe9b94c" />
 
-  <project path="system/libbase" name="platform/system/libbase" revision="542f384c6652ee020977c6583fc3ed814df4fc91" />
+  <project path="system/libbase" name="platform/system/libbase" revision="55f3f005930249a1031e6aa5ccb84e47f5a84c8b" />
 
-  <project path="system/logging" name="platform/system/logging" revision="83985071c8d7cafea915a0bf9e9e62e9a9ffa287" />
+  <project path="system/logging" name="platform/system/logging" revision="b44816aaac2a044fa29d348e21a4f4f32471d09e" />
 
-  <project path="system/libziparchive" name="platform/system/libziparchive" revision="38bcc139f44eb4d9ccfb612c26d91a3ddefc3b00" />
+  <project path="system/libziparchive" name="platform/system/libziparchive" revision="814471fce5d4de29c3717ca26d92bd74712efc1a" />
 
-  <project path="external/rust/crates/aho-corasick" name="platform/external/rust/crates/aho-corasick" revision="d414aacc5142856d6b9ab89c09f47bd2101917aa" />
+  <project path="external/rust/crates/aho-corasick" name="platform/external/rust/crates/aho-corasick" revision="4e0af96c71f9e3cfac7f4662957c94b5bfde6dff" />
 
-  <project path="external/rust/crates/bindgen" name="platform/external/rust/crates/bindgen" revision="245db42fdadbddd76ddd7f3c6bf030b55d61e662" />
+  <project path="external/rust/crates/bindgen" name="platform/external/rust/crates/bindgen" revision="c83eb534c4f95b7e2c1c7582d79ea3f767d9065f" />
 
-  <project path="external/rust/crates/bitflags" name="platform/external/rust/crates/bitflags" revision="3be99c1ed0cc14067a2c2badd1ae931e46b49693" />
+  <project path="external/rust/crates/bitflags" name="platform/external/rust/crates/bitflags" revision="9d9d8d2fce264c9b7fe994a3b06d435abf6709bc" />
 
-  <project path="external/rust/crates/cexpr" name="platform/external/rust/crates/cexpr" revision="d713000e27149aaeb8c18a0171b1c2d81c7db4c6" />
+  <project path="external/rust/crates/cexpr" name="platform/external/rust/crates/cexpr" revision="6e902fa0550a720be5ba581f690337e7a3b1a499" />
 
-  <project path="external/rust/crates/cfg-if" name="platform/external/rust/crates/cfg-if" revision="3a0379aa3da7a72be33132486b87df59254d6354" />
+  <project path="external/rust/crates/cfg-if" name="platform/external/rust/crates/cfg-if" revision="3bcbb81b5b53a1866e92c0e41d7de9a09f5ead95" />
 
-  <project path="external/rust/crates/clang-sys" name="platform/external/rust/crates/clang-sys" revision="9776ef82a5bb62a362aa1b2143996e74716f1fc2" />
+  <project path="external/rust/crates/clang-sys" name="platform/external/rust/crates/clang-sys" revision="ab8217957e81ebdbfea64b9f26790508bc7f8fdf" />
 
-  <project path="external/rust/crates/clap" name="platform/external/rust/crates/clap" revision="d9335dc735699a0d1c54490e8c2b052a90430106" />
+  <project path="external/rust/crates/clap" name="platform/external/rust/crates/clap" revision="03e005d91d9db060140f1b12243c79e59eb598f3" />
 
-  <project path="external/rust/crates/either" name="platform/external/rust/crates/either" revision="88fac7b76f59e6fbf781aa51c4cbc39190bf9237" />
+  <project path="external/rust/crates/either" name="platform/external/rust/crates/either" revision="5d1ca2c6eca40ede3de483ec1278f7e6ea39247a" />
 
-  <project path="external/rust/crates/glob" name="platform/external/rust/crates/glob" revision="6ec2da13f2a49633691a8af4874bd802f10365f5" />
+  <project path="external/rust/crates/glob" name="platform/external/rust/crates/glob" revision="4e1bf22e5891b150c05c110e216490ac05ab7573" />
 
-  <project path="external/rust/crates/lazy_static" name="platform/external/rust/crates/lazy_static" revision="fcce086e4b5164edc5c6acc7dae07d1aecdaa1e0" />
+  <project path="external/rust/crates/lazy_static" name="platform/external/rust/crates/lazy_static" revision="81dee171e4febd990293146eefef3e7fb9c1b452" />
 
-  <project path="external/rust/crates/lazycell" name="platform/external/rust/crates/lazycell" revision="22b0736188f3e445f7c832ae91f4f54aac98275c" />
+  <project path="external/rust/crates/lazycell" name="platform/external/rust/crates/lazycell" revision="89789bb6a2d45d0c42551fcd560397f7fb9e3d99" />
 
-  <project path="external/rust/crates/libc" name="platform/external/rust/crates/libc" revision="3672b8eaaf0eac98447c2408220317bc87ff56c4" />
+  <project path="external/rust/crates/libc" name="platform/external/rust/crates/libc" revision="2f88840fb45bdb5661b0a5a3cb42d35427955b15" />
 
-  <project path="external/rust/crates/libloading" name="platform/external/rust/crates/libloading" revision="46d5ad1c7ec6697c3dad1099dfe4592be4e2e63a" />
+  <project path="external/rust/crates/libloading" name="platform/external/rust/crates/libloading" revision="23887b84923c29db891ba4770e4560e44e4b907d" />
 
-  <project path="external/rust/crates/memchr" name="platform/external/rust/crates/memchr" revision="86c9a8c9b6b91e67029a2b218f9a8e13f45251aa" />
+  <project path="external/rust/crates/memchr" name="platform/external/rust/crates/memchr" revision="dfd7698bba7c230f3a1804277e0c6c5884e6267b" />
 
-  <project path="external/rust/crates/minimal-lexical" name="platform/external/rust/crates/minimal-lexical" revision="b483d0cb23801bdf1422fbbc8f25e6e7c1ebacc0" />
+  <project path="external/rust/crates/minimal-lexical" name="platform/external/rust/crates/minimal-lexical" revision="d09ed23d71ab7a2a9e2a539fd44fe62192c49b3d" />
 
-  <project path="external/rust/crates/nom" name="platform/external/rust/crates/nom" revision="7132b01d1e71be9dbca5a716c8e06ab1f59e0038" />
+  <project path="external/rust/crates/nom" name="platform/external/rust/crates/nom" revision="c9f7acf052e998028864b6a5f7d8f1405d422ee2" />
 
-  <project path="external/rust/crates/peeking_take_while" name="platform/external/rust/crates/peeking_take_while" revision="4f5cf36e0a807b9be215406ad1e9ed8cba463b38" />
+  <project path="external/rust/crates/peeking_take_while" name="platform/external/rust/crates/peeking_take_while" revision="7c6cfb021b5d5f4c95fa3b865faeac2c78ad759e" />
 
-  <project path="external/rust/crates/proc-macro2" name="platform/external/rust/crates/proc-macro2" revision="699150cfbea59d0215e8d927ccb70c542a8a810d" />
+  <project path="external/rust/crates/proc-macro2" name="platform/external/rust/crates/proc-macro2" revision="1401d56e2bb01a28cb43e394311992213d9ff057" />
 
-  <project path="external/rust/crates/quote" name="platform/external/rust/crates/quote" revision="5df1ca640f7d83e36abb6a4841f7c65af76c7e30" />
+  <project path="external/rust/crates/quote" name="platform/external/rust/crates/quote" revision="0d06f4afd70b85932dcb66c0ea64e5f68953db70" />
 
-  <project path="external/rust/crates/regex" name="platform/external/rust/crates/regex" revision="dc2fe9f00916f0ddde60759b272596c61dcfcee0" />
+  <project path="external/rust/crates/regex" name="platform/external/rust/crates/regex" revision="9d43a13537d3aba6c3705ba950804ce54fc3bd80" />
 
-  <project path="external/rust/crates/regex-syntax" name="platform/external/rust/crates/regex-syntax" revision="80a32d81b04de2683243dffdf87688a4ba693899" />
+  <project path="external/rust/crates/regex-syntax" name="platform/external/rust/crates/regex-syntax" revision="d6cbf52cfaf115ab96d28da7181a60c786526266" />
 
-  <project path="external/rust/crates/rustc-hash" name="platform/external/rust/crates/rustc-hash" revision="d1fefa58630a7501ee3a51ae91c001bcd1411c88" />
+  <project path="external/rust/crates/rustc-hash" name="platform/external/rust/crates/rustc-hash" revision="b9e389d8271a8166889feffb77b7f03798b74086" />
 
-  <project path="external/rust/crates/shlex" name="platform/external/rust/crates/shlex" revision="1ed8fff4d4df757513dd524421dba7a543094f8d" />
+  <project path="external/rust/crates/shlex" name="platform/external/rust/crates/shlex" revision="1a27ad38dbd8e83e0972bae75562415c06ed7457" />
 
-  <project path="external/rust/crates/textwrap" name="platform/external/rust/crates/textwrap" revision="b064c049c584f45e25aeccce13c53fc78429f4ee" />
+  <project path="external/rust/crates/textwrap" name="platform/external/rust/crates/textwrap" revision="3ed92fb22f1fcad32b62572be7bd4a2c996ab06e" />
 
-  <project path="external/rust/crates/unicode-xid" name="platform/external/rust/crates/unicode-xid" revision="be7123bc2814780a48a2bf95a1c69496fafeefe2" />
+  <project path="external/rust/crates/unicode-xid" name="platform/external/rust/crates/unicode-xid" revision="7b9720bd7cea029d59adbe6ee2163c2769c199a6" />
 
-  <project path="external/rust/crates/which" name="platform/external/rust/crates/which" revision="4be8607c6294ac1430edb4a2f42563dae5130239" />
+  <project path="external/rust/crates/which" name="platform/external/rust/crates/which" revision="a24c6972f58a35254904fea36d731c54d1f10a26" />
 
-  <project path="dalvik" name="platform/dalvik" revision="373c201450bbdc57fd72e37cac07720eeea4531c" />
+  <project path="dalvik" name="platform/dalvik" revision="90198b99b83ac24023b63f7a14610315737d3698" />
 
-  <project path="external/ninja" name="platform/external/ninja" revision="ed250d061bd8d7682c9f434c58dd3cebeb90f4ec" />
+  <project path="external/ninja" name="platform/external/ninja" revision="246659993ca5e43621f11711d7935223b6d1f295" />
 </manifest>